repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/chromatic/solar_wind.py | # -*- coding: utf-8 -*-
import os
import numpy as np
import scipy.stats as sps
import scipy.special as spsf
from enterprise import constants as const
from enterprise.signals import (deterministic_signals, gp_signals, parameter,
signal_base, utils)
from .. import gp_kernels as gpk
defpath = os.path.dirname(__file__)
yr_in_sec = 365.25*24*3600
@signal_base.function
def solar_wind(toas, freqs, planetssb, sunssb, pos_t,
n_earth=5, n_earth_bins=None,
t_init=None, t_final=None):
"""
Construct DM-Solar Model fourier design matrix.
:param toas: vector of time series in seconds
:param planetssb: solar system bayrcenter positions
:param pos_t: pulsar position as 3-vector
:param freqs: radio frequencies of observations [MHz]
:param n_earth: The electron density from the solar wind at 1 AU.
:param n_earth_bins: Number of binned values of n_earth for which to fit or
an array or list of bin edges to use for binned n_Earth values.
In the latter case the first and last edges must encompass all
TOAs and in all cases it must match the size (number of
elements) of n_earth.
:param t_init: Initial time of earliest TOA in entire dataset, including
all pulsars.
:param t_final: Final time of last TOA in entire dataset, including all
pulsars.
:return dt_DM: Chromatic time delay due to solar wind
"""
if n_earth_bins is None:
theta, R_earth, _, _ = theta_impact(planetssb, sunssb, pos_t)
dm_sol_wind = dm_solar(n_earth, theta, R_earth)
dt_DM = (dm_sol_wind) * 4.148808e3 / freqs**2
else:
if isinstance(n_earth_bins, int) and (t_init is None
or t_final is None):
err_msg = 'Need to enter t_init and t_final '
err_msg += 'to make binned n_earth values.'
raise ValueError(err_msg)
elif isinstance(n_earth_bins, int):
edges, step = np.linspace(t_init, t_final, n_earth_bins,
endpoint=True, retstep=True)
elif isinstance(n_earth_bins, list) or isinstance(n_earth_bins,
np.ndarray):
edges = n_earth_bins
dt_DM = []
for ii, bin in enumerate(edges[:-1]):
bin_mask = np.logical_and(toas >= bin, toas <= edges[ii + 1])
theta, R_earth, _, _ = theta_impact(planetssb,
sunssb,
pos_t)
dm_sol_wind = dm_solar(n_earth[ii], theta[bin_mask],
R_earth[bin_mask])
if dm_sol_wind.size != 0:
dt_DM.extend((dm_sol_wind)
* 4.148808e3 / freqs[bin_mask]**2)
else:
pass
dt_DM = np.array(dt_DM)
if dt_DM.size!=toas.size:
err_msg = 'dt_DM ({0}) does not '.format(dt_DM.size)
err_msg +='match number of TOAs ({0})!!!'.format(toas.size)
raise ValueError(err_msg)
return dt_DM
@signal_base.function
def solar_wind_r_to_p(toas, freqs, planetssb, sunssb, pos_t,
n_earth=5, power=4.39, log10_ne=False):
"""
Construct DM-Solar Model fourier design matrix.
:param toas: vector of time series in seconds
:param planetssb: solar system bayrcenter positions
:param pos_t: pulsar position as 3-vector
:param freqs: radio frequencies of observations [MHz]
:param n_earth: The electron density from the solar wind at 1 AU.
:param power: Negative power of the density profile for the solar wind, r^-p.
:param log10_ne: Whether the provided value is log10 of the electron
density for this term. Suggested to set True for power much larger than
normal.
:return dt_DM: Chromatic time delay due to solar wind
"""
if log10_ne:
n_earth = 10**n_earth
theta, _, b, z_earth = theta_impact(planetssb, sunssb, pos_t)
dm_sol_wind = dm_solar_r_to_p(n_earth, theta, b, z_earth, power)
dt_DM = (dm_sol_wind) * 4.148808e3 / freqs**2
dt_DM = np.array(dt_DM)
return dt_DM
# linear interpolation basis in time with nu^-2 scaling
@signal_base.function
def linear_interp_basis_sw_dm(toas, freqs, planetssb, sunssb,
pos_t, dt=7*86400):
# get linear interpolation basis in time
U, avetoas = utils.linear_interp_basis(toas, dt=dt)
# scale with radio frequency
theta, R_earth, _, _ = theta_impact(planetssb, sunssb, pos_t)
dm_sol_wind = dm_solar(1.0, theta, R_earth)
dt_DM = dm_sol_wind * 4.148808e3 / (freqs**2)
return U * dt_DM[:, None], avetoas
@signal_base.function
def createfourierdesignmatrix_solar_dm(toas, freqs, planetssb, sunssb, pos_t,
modes=None, nmodes=30,
Tspan=None, logf=True, fmin=None,
fmax=None):
"""
Construct DM-Solar Model fourier design matrix.
:param toas: vector of time series in seconds
:param planetssb: solar system bayrcenter positions
:param pos_t: pulsar position as 3-vector
:param nmodes: number of fourier coefficients to use
:param freqs: radio frequencies of observations [MHz]
:param Tspan: option to some other Tspan
:param logf: use log frequency spacing
:param fmin: lower sampling frequency
:param fmax: upper sampling frequency
:return: F: SW DM-variation fourier design matrix
:return: f: Sampling frequencies
"""
# get base fourier design matrix and frequencies
F, Ffreqs = utils.createfourierdesignmatrix_red(toas, nmodes=nmodes,
modes=modes,
Tspan=Tspan, logf=logf,
fmin=fmin, fmax=fmax)
theta, R_earth, _, _ = theta_impact(planetssb, sunssb, pos_t)
dm_sol_wind = dm_solar(1.0, theta, R_earth)
dt_DM = dm_sol_wind * 4.148808e3 /(freqs**2)
return F * dt_DM[:, None], Ffreqs
def solar_wind_block(n_earth=None, ACE_prior=False, include_swgp=True,
swgp_prior=None, swgp_basis=None, Tspan=None):
"""
Returns Solar Wind DM noise model. Best model from Hazboun, et al (in prep)
Contains a single mean electron density with an auxiliary perturbation
modeled using a gaussian process. The GP has common prior parameters
between all pulsars, but the realizations are different for all pulsars.
Solar Wind DM noise modeled as a power-law with 30 sampling frequencies
:param n_earth:
Solar electron density at 1 AU.
:param ACE_prior:
Whether to use the ACE SWEPAM data as an astrophysical prior.
:param swgp_prior:
Prior function for solar wind Gaussian process. Default is a power law.
:param swgp_basis:
Basis to be used for solar wind Gaussian process.
Options includes ['powerlaw'.'periodic','sq_exp']
:param Tspan:
Sets frequency sampling f_i = i / Tspan. Default will
use overall time span for individual pulsar. Default is to use 15
frequencies (1/Tspan,15/Tspan).
"""
if n_earth is None and not ACE_prior:
n_earth = parameter.Uniform(0, 30)('n_earth')
elif n_earth is None and ACE_prior:
n_earth = ACE_SWEPAM_Parameter()('n_earth')
else:
pass
deter_sw = solar_wind(n_earth=n_earth)
mean_sw = deterministic_signals.Deterministic(deter_sw, name='n_earth')
sw_model = mean_sw
if include_swgp:
if swgp_basis == 'powerlaw':
# dm noise parameters that are common
log10_A_sw = parameter.Uniform(-10, 1)
gamma_sw = parameter.Uniform(-2, 1)
sw_prior = utils.powerlaw(log10_A=log10_A_sw, gamma=gamma_sw)
if Tspan is not None:
freqs = np.linspace(1/Tspan, 30/Tspan, 30)
freqs = freqs[1/freqs > 1.5*yr_in_sec]
sw_basis = createfourierdesignmatrix_solar_dm(modes=freqs)
else:
sw_basis = createfourierdesignmatrix_solar_dm(nmodes=15,
Tspan=Tspan)
elif swgp_basis == 'periodic':
# Periodic GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
log10_p = parameter.Uniform(-4, 1)
log10_gam_p = parameter.Uniform(-3, 2)
sw_basis = gpk.linear_interp_basis_dm(dt=6*86400)
sw_prior = gpk.periodic_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell,
log10_gam_p=log10_gam_p,
log10_p=log10_p)
elif swgp_basis == 'sq_exp':
# squared-exponential GP kernel for DM
log10_sigma = parameter.Uniform(-10, -4)
log10_ell = parameter.Uniform(1, 4)
sw_basis = gpk.linear_interp_basis_dm(dt=6*86400)
sw_prior = gpk.se_dm_kernel(log10_sigma=log10_sigma,
log10_ell=log10_ell)
gp_sw = gp_signals.BasisGP(sw_prior, sw_basis, name='gp_sw')
sw_model += gp_sw
return sw_model
##### Utility Functions #########
AU_light_sec = const.AU / const.c # 1 AU in light seconds
AU_pc = const.AU / const.pc # 1 AU in parsecs (for DM normalization)
def _dm_solar_close(n_earth, r_earth):
return (n_earth * AU_light_sec * AU_pc / r_earth)
def _dm_solar(n_earth, theta, r_earth):
return ((np.pi - theta) *
(n_earth * AU_light_sec * AU_pc
/ (r_earth * np.sin(theta))))
def dm_solar(n_earth, theta, r_earth):
"""
Calculates Dispersion measure due to 1/r^2 solar wind density model.
::param :n_earth Solar wind proto/electron density at Earth (1/cm^3)
::param :theta: angle between sun and line-of-sight to pulsar (rad)
::param :r_earth :distance from Earth to Sun in (light seconds).
See You et al. 2007 for more details.
"""
return np.where(np.pi - theta >= 1e-5,
_dm_solar(n_earth, theta, r_earth),
_dm_solar_close(n_earth, r_earth))
def dm_solar_r_to_p(n_earth, theta, b, z_earth, p):
"""
Calculates Dispersion measure due to 1/r^p solar wind density model.
::param :n_earth Solar wind proton/electron density at Earth (1/cm^3)
::param :theta: angle between sun and line-of-sight to pulsar (rad)
::param :r_earth :distance from Earth to Sun in (light seconds).
See You et al. 20007 for more details.
"""
return _dm_solar_r_to_p(n_earth, b, z_earth, p)
def _dm_solar_close_r_to_p(n, z, p):
return n * (AU_light_sec / z)**(p - 1) * (AU_pc / p)
def _dm_solar_r_to_p(n, b, z, p):
return (n * (AU_light_sec / b)**p * b / const.pc * const.c
* (_dm_p_int(b, 1e14, p) - _dm_p_int(b, -z, p)))
def _dm_p_int(b, z, p):
return z / b * spsf.hyp2f1(0.5, p/2., 1.5, -z**2 / b**2)
def theta_impact(planetssb, sunssb, pos_t):
"""
Use the attributes of an enterprise Pulsar object to calculate the
solar impact angle.
::param :planetssb Solar system barycenter time series supplied with
enterprise.Pulsar objects.
::param :sunssb Solar system sun-to-barycenter timeseries supplied with
enterprise.Pulsar objects.
::param :pos_t Unit vector to pulsar position over time in ecliptic
coordinates. Supplied with enterprise.Pulsar objects.
returns: Solar impact angle (rad), Distance to Earth (R_earth),
impact distance (b), perpendicular distance (z_earth)
"""
earth = planetssb[:, 2, :3]
sun = sunssb[:, :3]
earthsun = earth - sun
R_earth = np.sqrt(np.einsum('ij,ij->i', earthsun, earthsun))
Re_cos_theta_impact = np.einsum('ij,ij->i', earthsun, pos_t)
theta_impact = np.arccos(-Re_cos_theta_impact / R_earth)
b = np.sqrt(R_earth**2 - Re_cos_theta_impact**2)
return theta_impact, R_earth, b, -Re_cos_theta_impact
def sw_mask(psrs, angle_cutoff=None):
"""
Convenience function for masking TOAs lower than a certain solar impact
angle.
param:: :psrs list of enterprise.Pulsar objects
param:: :angle_cutoff (degrees) Mask TOAs within this angle
returns:: dictionary of masks for each pulsar
"""
solar_wind_mask = {}
angle_cutoff = np.deg2rad(angle_cutoff)
for ii, p in enumerate(psrs):
impact_ang, _, _, _ = theta_impact(p)
solar_wind_mask[p.name] = np.where(impact_ang > angle_cutoff,
True, False)
return solar_wind_mask
# ACE Solar Wind Monitoring data prior for SW electron data.
# Using proton density as a stand in.
def ACE_SWEPAM_Prior(value):
"""Prior function for ACE SWEPAM parameters."""
return ACE_RV.pdf(value)
def ACE_SWEPAM_Sampler(size=None):
"""Sampling function for Uniform parameters."""
return ACE_RV.rvs(size=size)
def ACE_SWEPAM_Parameter(size=None):
"""Class factory for ACE SWEPAM parameters."""
class ACE_SWEPAM_Parameter(parameter.Parameter):
_size = size
_typename = parameter._argrepr('ACE_SWEPAM')
_prior = parameter.Function(ACE_SWEPAM_Prior)
_sampler = staticmethod(ACE_SWEPAM_Sampler)
return ACE_SWEPAM_Parameter
######## Scipy defined RV for ACE SWEPAM proton density data. ########
data_file = defpath + '/ACE_SWEPAM_daily_proton_density_1998_2018_MJD_cm-3.txt'
proton_density = np.loadtxt(data_file)
ne_hist = np.histogram(proton_density[:, 1], bins=100, density=True)
ACE_RV = sps.rv_histogram(ne_hist)
| 13,958 | 35.637795 | 81 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/frequentist/optimal_statistic.py | # -*- coding: utf-8 -*-
import warnings
import numpy as np
import scipy.linalg as sl
from enterprise.signals import gp_priors, signal_base, utils
from enterprise_extensions import model_orfs, models
# Define the output to be on a single line.
def warning_on_one_line(message, category, filename, lineno, file=None, line=None):
return '%s:%s: %s: %s\n' % (filename, lineno, category.__name__, message)
# Override default format.
warnings.formatwarning = warning_on_one_line
class OptimalStatistic(object):
"""
Class for the Optimal Statistic as used in the analysis paper.
This class can be used for both standard ML or noise-marginalized OS.
:param psrs: List of `enterprise` Pulsar instances.
:param bayesephem: Include BayesEphem model. Default=True
:param gamma_common:
Fixed common red process spectral index value. By default we
vary the spectral index over the range [0, 7].
:param orf:
String representing which overlap reduction function to use.
By default we do not use any spatial correlations. Permitted
values are ['hd', 'dipole', 'monopole'].
"""
def __init__(self, psrs, bayesephem=True, gamma_common=4.33, orf='hd',
wideband=False, select=None, noisedict=None, pta=None):
# initialize standard model with fixed white noise and
# and powerlaw red and gw signal
if pta is None:
self.pta = models.model_2a(psrs, psd='powerlaw',
bayesephem=bayesephem,
gamma_common=gamma_common,
is_wideband=wideband,
select='backend', noisedict=noisedict)
else:
if np.any(['marginalizing_linear_timing' in sig for sig in pta.signals]):
msg = "Can't run optimal statistic with `enterprise.gp_signals.MarginalizingTimingModel`."
msg += " Try creating PTA with `enterprise.gp_signals.TimingModel`, or if using `enterprise_extensions`"
msg += " set `tm_marg=False`."
raise ValueError(msg)
self.pta = pta
self.gamma_common = gamma_common
# get frequencies here
self.freqs = self._get_freqs(psrs)
# set up cache
self._set_cache_parameters()
# pulsar locations
self.psrlocs = [p.pos for p in psrs]
# overlap reduction function
if orf == 'hd':
self.orf = model_orfs.hd_orf
elif orf == 'dipole':
self.orf = model_orfs.dipole_orf
elif orf == 'monopole':
self.orf = model_orfs.monopole_orf
elif orf == 'gw_monopole':
self.orf = model_orfs.gw_monopole_orf
elif orf == 'gw_dipole':
self.orf = model_orfs.gw_dipole_orf
elif orf == 'st':
self.orf = model_orfs.st_orf
else:
raise ValueError('Unknown ORF!')
def compute_os(self, params=None, psd='powerlaw', fgw=None):
"""
Computes the optimal statistic values given an
`enterprise` parameter dictionary.
:param params: `enterprise` parameter dictionary.
:param psd: choice of cross-power psd [powerlaw,spectrum]
:fgw: frequency of GW spectrum to probe, in Hz [default=None]
:returns:
xi: angular separation [rad] for each pulsar pair
rho: correlation coefficient for each pulsar pair
sig: 1-sigma uncertainty on correlation coefficient for each pulsar pair.
OS: Optimal statistic value (units of A_gw^2)
OS_sig: 1-sigma uncertainty on OS
.. note:: SNR is computed as OS / OS_sig. In the case of a 'spectrum' model
the OS variable will be the PSD(fgw) * Tspan value at the relevant fgw bin.
"""
if params is None:
params = {name: par.sample() for name, par
in zip(self.pta.param_names, self.pta.params)}
else:
# check to see that the params dictionary includes values
# for all of the parameters in the model
for p in self.pta.param_names:
if p not in params.keys():
msg = '{0} is not included '.format(p)
msg += 'in the parameter dictionary. '
msg += 'Drawing a random value.'
warnings.warn(msg)
# get matrix products
TNrs = self.get_TNr(params=params)
TNTs = self.get_TNT(params=params)
FNrs = self.get_FNr(params=params)
FNFs = self.get_FNF(params=params)
FNTs = self.get_FNT(params=params)
phiinvs = self.pta.get_phiinv(params, logdet=False)
X, Z = [], []
for TNr, TNT, FNr, FNF, FNT, phiinv in zip(TNrs, TNTs, FNrs, FNFs, FNTs, phiinvs):
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
try:
cf = sl.cho_factor(Sigma)
SigmaTNr = sl.cho_solve(cf, TNr)
SigmaTNF = sl.cho_solve(cf, FNT.T)
except np.linalg.LinAlgError:
SigmaTNr = np.linalg.solve(Sigma, TNr)
SigmaTNF = np.linalg.solve(Sigma, FNT.T)
FNTSigmaTNr = np.dot(FNT, SigmaTNr)
X.append(FNr - FNTSigmaTNr)
Z.append(FNF - np.dot(FNT, SigmaTNF))
npsr = len(self.pta._signalcollections)
rho, sig, ORF, xi = [], [], [], []
for ii in range(npsr):
for jj in range(ii+1, npsr):
if psd == 'powerlaw':
if self.gamma_common is None and 'gw_gamma' in params.keys():
phiIJ = utils.powerlaw(self.freqs, log10_A=0,
gamma=params['gw_gamma'])
else:
phiIJ = utils.powerlaw(self.freqs, log10_A=0,
gamma=self.gamma_common)
elif psd == 'spectrum':
Sf = -np.inf * np.ones(int(len(self.freqs)/2))
idx = (np.abs(np.unique(self.freqs) - fgw)).argmin()
Sf[idx] = 0.0
phiIJ = gp_priors.free_spectrum(self.freqs,
log10_rho=Sf)
top = np.dot(X[ii], phiIJ * X[jj])
bot = np.trace(np.dot(Z[ii]*phiIJ[None, :], Z[jj]*phiIJ[None, :]))
# cross correlation and uncertainty
rho.append(top / bot)
sig.append(1 / np.sqrt(bot))
# Overlap reduction function for PSRs ii, jj
ORF.append(self.orf(self.psrlocs[ii], self.psrlocs[jj]))
# angular separation
xi.append(np.arccos(np.dot(self.psrlocs[ii], self.psrlocs[jj])))
rho = np.array(rho)
sig = np.array(sig)
ORF = np.array(ORF)
xi = np.array(xi)
OS = (np.sum(rho*ORF / sig ** 2) / np.sum(ORF ** 2 / sig ** 2))
OS_sig = 1 / np.sqrt(np.sum(ORF ** 2 / sig ** 2))
return xi, rho, sig, OS, OS_sig
def compute_noise_marginalized_os(self, chain, param_names=None, N=10000):
"""
Compute noise marginalized OS.
:param chain: MCMC chain from Bayesian run.
:param param_names: list of parameter names for the chain file
:param N: number of iterations to run.
:returns: (os, snr) array of OS and SNR values for each iteration.
"""
# check that the chain file has the same number of parameters as the model
if chain.shape[1] - 4 != len(self.pta.param_names):
msg = 'MCMC chain does not have the same number of parameters '
msg += 'as the model.'
warnings.warn(msg)
opt, sig = np.zeros(N), np.zeros(N)
rho, rho_sig = [], []
setpars = {}
for ii in range(N):
idx = np.random.randint(0, chain.shape[0])
# if param_names is not specified, the parameter dictionary
# is made by mapping the values from the chain to the
# parameters in the pta object
if param_names is None:
setpars.update(self.pta.map_params(chain[idx, :-4]))
else:
setpars = dict(zip(param_names, chain[idx, :-4]))
xi, rho_tmp, rho_sig_tmp, opt[ii], sig[ii] = self.compute_os(params=setpars)
rho.append(rho_tmp)
rho_sig.append(rho_sig_tmp)
return (np.array(xi), np.array(rho), np.array(rho_sig), opt, opt/sig)
def compute_noise_maximized_os(self, chain, param_names=None):
"""
Compute noise maximized OS.
:param chain: MCMC chain from Bayesian run.
:returns:
xi: angular separation [rad] for each pulsar pair
rho: correlation coefficient for each pulsar pair
sig: 1-sigma uncertainty on correlation coefficient for each pulsar pair.
OS: Optimal statistic value (units of A_gw^2)
SNR: OS / OS_sig
"""
# check that the chain file has the same number of parameters as the model
if chain.shape[1] - 4 != len(self.pta.param_names):
msg = 'MCMC chain does not have the same number of parameters '
msg += 'as the model.'
warnings.warn(msg)
idx = np.argmax(chain[:, -4])
# if param_names is not specified, the parameter dictionary
# is made by mapping the values from the chain to the
# parameters in the pta object
if param_names is None:
setpars = (self.pta.map_params(chain[idx, :-4]))
else:
setpars = dict(zip(param_names, chain[idx, :-4]))
xi, rho, sig, Opt, Sig = self.compute_os(params=setpars)
return (xi, rho, sig, Opt, Opt/Sig)
def compute_multiple_corr_os(self, params=None, psd='powerlaw', fgw=None,
correlations=['monopole', 'dipole', 'hd']):
"""
Fits the correlations to multiple spatial correlation functions
:param params: `enterprise` parameter dictionary.
:param psd: choice of cross-power psd [powerlaw,spectrum]
:param fgw: frequency of GW spectrum to probe, in Hz [default=None]
:param correlations: list of correlation functions
:returns:
xi: angular separation [rad] for each pulsar pair
rho: correlation coefficient for each pulsar pair
sig: 1-sigma uncertainty on correlation coefficient for each pulsar pair.
A: An array of correlation amplitudes
OS_sig: An array of 1-sigma uncertainties on the correlation amplitudes
"""
xi, rho, sig, _, _ = self.compute_os(params=params, psd='powerlaw', fgw=None)
# construct a list of all the ORFs to be fit simultaneously
ORFs = []
for corr in correlations:
if corr == 'hd':
orf_func = model_orfs.hd_orf
elif corr == 'dipole':
orf_func = model_orfs.dipole_orf
elif corr == 'monopole':
orf_func = model_orfs.monopole_orf
elif corr == 'gw_monopole':
orf_func = model_orfs.gw_monopole_orf
elif corr == 'gw_dipole':
orf_func = model_orfs.gw_dipole_orf
elif corr == 'st':
orf_func = model_orfs.st_orf
else:
raise ValueError('Unknown ORF!')
ORF = []
npsr = len(self.pta._signalcollections)
for ii in range(npsr):
for jj in range(ii+1, npsr):
ORF.append(orf_func(self.psrlocs[ii], self.psrlocs[jj]))
ORFs.append(np.array(ORF))
Bmat = np.array([[np.sum(ORFs[i]*ORFs[j]/sig**2) for i in range(len(ORFs))]
for j in range(len(ORFs))])
Bmatinv = np.linalg.inv(Bmat)
Cmat = np.array([np.sum(rho*ORFs[i]/sig**2) for i in range(len(ORFs))])
A = np.dot(Bmatinv, Cmat)
A_err = np.array([np.sqrt(Bmatinv[i, i]) for i in range(len(ORFs))])
return xi, rho, sig, A, A_err
def compute_noise_marginalized_multiple_corr_os(self, chain, param_names=None, N=10000,
correlations=['monopole', 'dipole', 'hd']):
"""
Noise-marginalized fitting of the correlations to multiple spatial
correlation functions
:param correlations: list of correlation functions
:param chain: MCMC chain from Bayesian run.
:param param_names: list of parameter names for the chain file
:param N: number of iterations to run.
:returns:
xi: angular separation [rad] for each pulsar pair
rho: correlation coefficient for each pulsar pair and for each noise realization
sig: 1-sigma uncertainty on correlation coefficient for each pulsar pair and for each noise realization
A: An array of correlation amplitudes for each noise realization
OS_sig: An array of 1-sigma uncertainties on the correlation amplitudes for each noise realization
"""
# check that the chain file has the same number of parameters as the model
if chain.shape[1] - 4 != len(self.pta.param_names):
msg = 'MCMC chain does not have the same number of parameters '
msg += 'as the model.'
warnings.warn(msg)
rho, sig, A, A_err = [], [], [], []
setpars = {}
for ii in range(N):
idx = np.random.randint(0, chain.shape[0])
# if param_names is not specified, the parameter dictionary
# is made by mapping the values from the chain to the
# parameters in the pta object
if param_names is None:
setpars.update(self.pta.map_params(chain[idx, :-4]))
else:
setpars = dict(zip(param_names, chain[idx, :-4]))
xi, rho_tmp, sig_tmp, A_tmp, A_err_tmp = self.compute_multiple_corr_os(params=setpars,
correlations=correlations)
rho.append(rho_tmp)
sig.append(sig_tmp)
A.append(A_tmp)
A_err.append(A_err_tmp)
return np.array(xi), np.array(rho), np.array(sig), np.array(A), np.array(A_err)
@signal_base.cache_call(['basis_params'])
def get_Fmats(self, params={}):
"""Kind of a hack to get F-matrices"""
Fmats = []
for sc in self.pta._signalcollections:
ind = []
for signal, idx in sc._idx.items():
if 'red noise' in signal.signal_name and signal.signal_id in ['gw', 'gw_crn']:
ind.append(idx)
ix = np.unique(np.concatenate(ind))
Fmats.append(sc.get_basis(params=params)[:, ix])
return Fmats
def _get_freqs(self, psrs):
"""Hackish way to get frequency vector."""
for sig in self.pta._signalcollections[0]._signals:
if 'red noise' in sig.signal_name and sig.signal_id in ['gw', 'gw_crn']:
# make sure the basis is created
_ = sig.get_basis()
if isinstance(sig._labels, np.ndarray):
return sig._labels
else:
return sig._labels['']
raise ValueError("No frequency basis in pulsar models")
def _set_cache_parameters(self):
""" Set cache parameters for efficiency. """
self.white_params = list(set(par for sc in self.pta._signalcollections
for par in sc.white_params))
self.basis_params = list(set(par for sc in self.pta._signalcollections
for par in sc.basis_params))
self.delay_params = list(set(par for sc in self.pta._signalcollections
for par in sc.delay_params))
def get_TNr(self, params={}):
return self.pta.get_TNr(params=params)
@signal_base.cache_call(['white_params', 'delay_params', 'basis_params'])
def get_FNr(self, params={}):
FNrs = []
for ct, sc in enumerate(self.pta._signalcollections):
N = sc.get_ndiag(params=params)
F = self.get_Fmats(params)[ct]
res = sc.get_detres(params=params)
FNrs.append(N.solve(res, left_array=F))
return FNrs
@signal_base.cache_call(['white_params', 'basis_params'])
def get_FNF(self, params={}):
FNFs = []
for ct, sc in enumerate(self.pta._signalcollections):
N = sc.get_ndiag(params=params)
F = self.get_Fmats(params)[ct]
FNFs.append(N.solve(F, left_array=F))
return FNFs
def get_TNT(self, params={}):
return self.pta.get_TNT(params=params)
@signal_base.cache_call(['white_params', 'basis_params'])
def get_FNT(self, params={}):
FNTs = []
for ct, sc in enumerate(self.pta._signalcollections):
N = sc.get_ndiag(params=params)
F = self.get_Fmats(params)[ct]
T = sc.get_basis(params=params)
FNTs.append(N.solve(T, left_array=F))
return FNTs
| 17,367 | 38.205418 | 120 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/frequentist/Fe_statistic.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.linalg as sl
from enterprise.signals import (gp_signals, parameter, signal_base, utils,
white_signals)
class FeStat(object):
"""
Class for the Fe-statistic.
:param psrs: List of `enterprise` Pulsar instances.
:param params: Dictionary of noise parameters.
"""
def __init__(self, psrs, params=None):
print('Initializing the model...')
efac = parameter.Constant()
equad = parameter.Constant()
ef = white_signals.MeasurementNoise(efac=efac)
eq = white_signals.EquadNoise(log10_equad=equad)
tm = gp_signals.TimingModel(use_svd=True)
s = eq + ef + tm
model = []
for p in psrs:
model.append(s(p))
self.pta = signal_base.PTA(model)
# set white noise parameters
if params is None:
print('No noise dictionary provided!...')
else:
self.pta.set_default_params(params)
self.psrs = psrs
self.params = params
self.Nmats = None
def get_Nmats(self):
'''Makes the Nmatrix used in the fstatistic'''
TNTs = self.pta.get_TNT(self.params)
phiinvs = self.pta.get_phiinv(self.params, logdet=False, method='partition')
# Get noise parameters for pta toaerr**2
Nvecs = self.pta.get_ndiag(self.params)
# Get the basis matrix
Ts = self.pta.get_basis(self.params)
Nmats = [make_Nmat(phiinv, TNT, Nvec, T) for phiinv, TNT, Nvec, T in zip(phiinvs, TNTs, Nvecs, Ts)]
return Nmats
def compute_Fe(self, f0, gw_skyloc, brave=False, maximized_parameters=False):
"""
Computes the Fe-statistic (see Ellis, Siemens, Creighton 2012).
:param f0: GW frequency
:param gw_skyloc: 2x{number of sky locations} array containing [theta, phi] for each queried sky location,
where theta=pi/2-DEC, phi=RA,
for singlge sky location use gw_skyloc= np.array([[theta,],[phi,]])
:param brave: Skip sanity checks in linalg for speedup if True.
:param maximized_parameters: Calculate maximized extrinsic parameters if True.
:returns:
fstat: value of the Fe-statistic
:if maximized_parameters=True also returns:
inc_max: Maximized value of inclination
psi_max: Maximized value of polarization angle
phase0_max: Maximized value of initial fhase
h_max: Maximized value of amplitude
"""
tref=53000*86400
phiinvs = self.pta.get_phiinv(self.params, logdet=False)
TNTs = self.pta.get_TNT(self.params)
Ts = self.pta.get_basis()
if self.Nmats is None:
self.Nmats = self.get_Nmats()
n_psr = len(self.psrs)
N = np.zeros((n_psr, 4))
M = np.zeros((n_psr, 4, 4))
for idx, (psr, Nmat, TNT, phiinv, T) in enumerate(zip(self.psrs, self.Nmats,
TNTs, phiinvs, Ts)):
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
ntoa = len(psr.toas)
A = np.zeros((4, ntoa))
A[0, :] = 1 / f0 ** (1 / 3) * np.sin(2 * np.pi * f0 * (psr.toas-tref))
A[1, :] = 1 / f0 ** (1 / 3) * np.cos(2 * np.pi * f0 * (psr.toas-tref))
A[2, :] = 1 / f0 ** (1 / 3) * np.sin(2 * np.pi * f0 * (psr.toas-tref))
A[3, :] = 1 / f0 ** (1 / 3) * np.cos(2 * np.pi * f0 * (psr.toas-tref))
ip1 = innerProduct_rr(A[0, :], psr.residuals, Nmat, T, Sigma, brave=brave)
ip2 = innerProduct_rr(A[1, :], psr.residuals, Nmat, T, Sigma, brave=brave)
ip3 = innerProduct_rr(A[2, :], psr.residuals, Nmat, T, Sigma, brave=brave)
ip4 = innerProduct_rr(A[3, :], psr.residuals, Nmat, T, Sigma, brave=brave)
N[idx, :] = np.array([ip1, ip2, ip3, ip4])
# define M matrix M_ij=(A_i|A_j)
for jj in range(4):
for kk in range(4):
M[idx, jj, kk] = innerProduct_rr(A[jj, :], A[kk, :], Nmat, T, Sigma, brave=brave)
fstat = np.zeros(gw_skyloc.shape[1])
if maximized_parameters:
inc_max = np.zeros(gw_skyloc.shape[1])
psi_max = np.zeros(gw_skyloc.shape[1])
phase0_max = np.zeros(gw_skyloc.shape[1])
h_max = np.zeros(gw_skyloc.shape[1])
for j, gw_pos in enumerate(gw_skyloc.T):
NN = np.copy(N)
MM = np.copy(M)
for idx, psr in enumerate(self.psrs):
F_p, F_c, _ = utils.create_gw_antenna_pattern(psr.pos, gw_pos[0], gw_pos[1])
NN[idx, :] *= np.array([F_p, F_p, F_c, F_c])
MM[idx, :, :] *= np.array([[F_p**2, F_p**2, F_p*F_c, F_p*F_c],
[F_p**2, F_p**2, F_p*F_c, F_p*F_c],
[F_p*F_c, F_p*F_c, F_c**2, F_c**2],
[F_p*F_c, F_p*F_c, F_c**2, F_c**2]])
N_sum = np.sum(NN, axis=0)
M_sum = np.sum(MM, axis=0)
# take inverse of M
Minv = np.linalg.pinv(M_sum)
fstat[j] = 0.5 * np.dot(N_sum, np.dot(Minv, N_sum))
if maximized_parameters:
a_hat = np.dot(Minv, N_sum)
A_p = (np.sqrt((a_hat[0]+a_hat[3])**2 + (a_hat[1]-a_hat[2])**2) +
np.sqrt((a_hat[0]-a_hat[3])**2 + (a_hat[1]+a_hat[2])**2))
A_c = (np.sqrt((a_hat[0]+a_hat[3])**2 + (a_hat[1]-a_hat[2])**2) -
np.sqrt((a_hat[0]-a_hat[3])**2 + (a_hat[1]+a_hat[2])**2))
AA = A_p + np.sqrt(A_p**2 - A_c**2)
# AA = A_p + np.sqrt(A_p**2 + A_c**2)
# inc_max[j] = np.arccos(-A_c/AA)
inc_max[j] = np.arccos(A_c/AA)
two_psi_max = np.arctan2((A_p*a_hat[3] - A_c*a_hat[0]),
(A_c*a_hat[2] + A_p*a_hat[1]))
psi_max[j]=0.5*np.arctan2(np.sin(two_psi_max),
-np.cos(two_psi_max))
# convert from [-pi, pi] convention to [0,2*pi] convention
if psi_max[j]<0:
psi_max[j]+=np.pi
# correcting weird problem of degeneracy (psi-->pi-psi/2 and phi0-->2pi-phi0 keep everything the same)
if psi_max[j]>np.pi/2:
psi_max[j]+= -np.pi/2
half_phase0 = -0.5*np.arctan2(A_p*a_hat[3] - A_c*a_hat[0],
A_c*a_hat[1] + A_p*a_hat[2])
phase0_max[j] = np.arctan2(-np.sin(2*half_phase0),
np.cos(2*half_phase0))
# convert from [-pi, pi] convention to [0,2*pi] convention
if phase0_max[j]<0:
phase0_max[j]+=2*np.pi
zeta = np.abs(AA)/4 # related to amplitude, zeta=M_chirp^(5/3)/D
h_max[j] = zeta * 2 * (np.pi*f0)**(2/3)*np.pi**(1/3)
if maximized_parameters:
return fstat, inc_max, psi_max, phase0_max, h_max
else:
return fstat
def innerProduct_rr(x, y, Nmat, Tmat, Sigma, TNx=None, TNy=None, brave=False):
r"""
Compute inner product using rank-reduced
approximations for red noise/jitter
Compute: x^T N^{-1} y - x^T N^{-1} T \Sigma^{-1} T^T N^{-1} y
:param x: vector timeseries 1
:param y: vector timeseries 2
:param Nmat: white noise matrix
:param Tmat: Modified design matrix including red noise/jitter
:param Sigma: Sigma matrix (\varphi^{-1} + T^T N^{-1} T)
:param TNx: T^T N^{-1} x precomputed
:param TNy: T^T N^{-1} y precomputed
:return: inner product (x|y)
"""
# white noise term
Ni = Nmat
xNy = np.dot(np.dot(x, Ni), y)
Nx, Ny = np.dot(Ni, x), np.dot(Ni, y)
if TNx is None and TNy is None:
TNx = np.dot(Tmat.T, Nx)
TNy = np.dot(Tmat.T, Ny)
if brave:
cf = sl.cho_factor(Sigma, check_finite=False)
SigmaTNy = sl.cho_solve(cf, TNy, check_finite=False)
else:
cf = sl.cho_factor(Sigma)
SigmaTNy = sl.cho_solve(cf, TNy)
ret = xNy - np.dot(TNx, SigmaTNy)
return ret
def make_Nmat(phiinv, TNT, Nvec, T):
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
cf = sl.cho_factor(Sigma)
# Nshape = np.shape(T)[0] # Not currently used in code
TtN = np.multiply((1/Nvec)[:, None], T).T
# Put pulsar's autoerrors in a diagonal matrix
Ndiag = np.diag(1/Nvec)
expval2 = sl.cho_solve(cf, TtN)
# TtNt = np.transpose(TtN) # Not currently used in code
# An Ntoa by Ntoa noise matrix to be used in expand dense matrix calculations earlier
return Ndiag - np.dot(TtN.T, expval2)
| 8,981 | 35.661224 | 118 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/frequentist/F_statistic.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.special
from enterprise.signals import deterministic_signals, gp_signals, signal_base
from enterprise_extensions import blocks, deterministic
def get_xCy(Nvec, T, sigmainv, x, y):
"""Get x^T C^{-1} y"""
TNx = Nvec.solve(x, left_array=T)
TNy = Nvec.solve(y, left_array=T)
xNy = Nvec.solve(y, left_array=x)
return xNy - TNx @ sigmainv @ TNy
def get_TCy(Nvec, T, y, sigmainv, TNT):
"""Get T^T C^{-1} y"""
TNy = Nvec.solve(y, left_array=T)
return TNy - TNT @ sigmainv @ TNy
def innerprod(Nvec, T, sigmainv, TNT, x, y):
"""Get the inner product between x and y"""
xCy = get_xCy(Nvec, T, sigmainv, x, y)
TCy = get_TCy(Nvec, T, y, sigmainv, TNT)
TCx = get_TCy(Nvec, T, x, sigmainv, TNT)
return xCy - TCx.T @ sigmainv @ TCy
class FpStat(object):
"""
Class for the Fp-statistic.
:param psrs: List of `enterprise` Pulsar instances.
:param noisedict: Dictionary of white noise parameter values. Default=None
:param psrTerm: Include the pulsar term in the CW signal model. Default=True
:param bayesephem: Include BayesEphem model. Default=True
"""
def __init__(self, psrs, noisedict=None,
psrTerm=True, bayesephem=True, pta=None, tnequad=False):
if pta is None:
# initialize standard model with fixed white noise
# and powerlaw red noise
# uses the implementation of ECORR in gp_signals
print('Initializing the model...')
tmin = np.min([p.toas.min() for p in psrs])
tmax = np.max([p.toas.max() for p in psrs])
Tspan = tmax - tmin
s = gp_signals.TimingModel(use_svd=True)
s += deterministic.cw_block_circ(amp_prior='log-uniform',
psrTerm=psrTerm, tref=tmin, name='cw')
s += blocks.red_noise_block(prior='log-uniform', psd='powerlaw',
Tspan=Tspan, components=30)
if bayesephem:
s += deterministic_signals.PhysicalEphemerisSignal(use_epoch_toas=True)
# adding white-noise, and acting on psr objects
models = []
for p in psrs:
if 'NANOGrav' in p.flags['pta']:
s2 = s + blocks.white_noise_block(vary=False, inc_ecorr=True,
gp_ecorr=True, tnequad=tnequad)
models.append(s2(p))
else:
s3 = s + blocks.white_noise_block(vary=False, inc_ecorr=False, tnequad=tnequad)
models.append(s3(p))
pta = signal_base.PTA(models)
# set white noise parameters
if noisedict is None:
print('No noise dictionary provided!')
else:
pta.set_default_params(noisedict)
self.pta = pta
else:
# user can specify their own pta object
# if ECORR is included, use the implementation in gp_signals
self.pta = pta
self.psrs = psrs
self.noisedict = noisedict
# precompute important bits:
self.phiinvs = self.pta.get_phiinv(noisedict)
self.TNTs = self.pta.get_TNT(noisedict)
self.Nvecs = self.pta.get_ndiag(noisedict)
self.Ts = self.pta.get_basis(noisedict)
# self.cf_TNT = [sl.cho_factor(TNT + np.diag(phiinv)) for TNT, phiinv in zip(self.TNTs, self.phiinvs)]
self.sigmainvs = [np.linalg.pinv(TNT + np.diag(phiinv)) for TNT, phiinv in zip(self.TNTs, self.phiinvs)]
def compute_Fp(self, fgw):
"""
Computes the Fp-statistic.
:param fgw: GW frequency
:returns:
fstat: value of the Fp-statistic at the given frequency
"""
N = np.zeros(2)
M = np.zeros((2, 2))
fstat = 0
for psr, Nvec, TNT, T, sigmainv in zip(self.psrs, self.Nvecs, self.TNTs, self.Ts, self.sigmainvs):
ntoa = len(psr.toas)
A = np.zeros((2, ntoa))
A[0, :] = 1 / fgw ** (1 / 3) * np.sin(2 * np.pi * fgw * psr.toas)
A[1, :] = 1 / fgw ** (1 / 3) * np.cos(2 * np.pi * fgw * psr.toas)
ip1 = innerprod(Nvec, T, sigmainv, TNT, A[0, :], psr.residuals)
# logger.info(ip1)
ip2 = innerprod(Nvec, T, sigmainv, TNT, A[1, :], psr.residuals)
# logger.info(ip2)
N = np.array([ip1, ip2])
# define M matrix M_ij=(A_i|A_j)
for jj in range(2):
for kk in range(2):
M[jj, kk] = innerprod(Nvec, T, sigmainv, TNT, A[jj, :], A[kk, :])
# take inverse of M
Minv = np.linalg.pinv(M)
fstat += 0.5 * np.dot(N, np.dot(Minv, N))
return fstat
def compute_fap(self, fgw):
"""
Compute false alarm rate for Fp-Statistic. We calculate
the log of the FAP and then exponentiate it in order
to avoid numerical precision problems
:param fgw: GW frequency
:returns: False alarm probability as defined in Eq (64)
of Ellis, Seiemens, Creighton (2012)
"""
fp0 = self.compute_Fp(fgw)
N = len(self.psrs)
n = np.arange(0, N)
return np.sum(np.exp(n*np.log(fp0)-fp0-np.log(scipy.special.gamma(n+1))))
| 5,418 | 33.297468 | 112 | py |
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/frequentist/__init__.py | 0 | 0 | 0 | py |
|
enterprise_extensions | enterprise_extensions-master/enterprise_extensions/frequentist/chi_squared.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.linalg as sl
def get_chi2(pta, xs):
"""Compute generalize chisq for pta:
chisq = y^T (N + F phi F^T)^-1 y
= y^T N^-1 y - y^T N^-1 F (F^T N^-1 F + phi^-1)^-1 F^T N^-1 y
"""
params = xs if isinstance(xs, dict) else pta.map_params(xs)
# chisq = y^T (N + F phi F^T)^-1 y
# = y^T N^-1 y - y^T N^-1 F (F^T N^-1 F + phi^-1)^-1 F^T N^-1 y
TNrs = pta.get_TNr(params)
TNTs = pta.get_TNT(params)
phiinvs = pta.get_phiinv(params, logdet=True, method='cliques')
chi2 = np.sum(ell[0] for ell in pta.get_rNr_logdet(params))
if pta._commonsignals:
raise NotImplementedError("get_chi2 does not support correlated signals")
else:
for TNr, TNT, pl in zip(TNrs, TNTs, phiinvs):
if TNr is None:
continue
phiinv, _ = pl
Sigma = TNT + (np.diag(phiinv) if phiinv.ndim == 1 else phiinv)
try:
cf = sl.cho_factor(Sigma)
expval = sl.cho_solve(cf, TNr)
except sl.LinAlgError: # pragma: no cover
return -np.inf
chi2 = chi2 - np.dot(TNr, expval)
return chi2
def get_reduced_chi2(pta, xs):
"""
Compute Generalized Reduced Chi Square for PTA using degrees of freedom
(DOF), defined by dof= NTOAs - N Timing Parameters - N Model Params.
"""
keys = [ky for ky in pta._signal_dict.keys() if 'timing_model' in ky]
chi2 = get_chi2(pta, xs)
degs = np.array([pta._signal_dict[ky].get_basis().shape for ky in keys])
dof = np.sum(degs[:, 0]) - np.sum(degs[:, 1])
dof -= len(pta.param_names)
return chi2/dof
| 1,704 | 29.446429 | 81 | py |
Graph-Unlearning | Graph-Unlearning-main/main.py | import logging
import torch
from exp.exp_graph_partition import ExpGraphPartition
from exp.exp_node_edge_unlearning import ExpNodeEdgeUnlearning
from exp.exp_unlearning import ExpUnlearning
from exp.exp_attack_unlearning import ExpAttackUnlearning
from parameter_parser import parameter_parser
def config_logger(save_name):
# create logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s:%(asctime)s: - %(name)s - : %(message)s')
# create console handler
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
def main(args, exp):
# config the logger
logger_name = "_".join((exp, args['dataset_name'], args['partition_method'], str(args['num_shards']), str(args['test_ratio'])))
config_logger(logger_name)
logging.info(logger_name)
torch.set_num_threads(args["num_threads"])
torch.cuda.set_device(args["cuda"])
os.environ["CUDA_VISIBLE_DEVICES"] = str(args["cuda"])
# subroutine entry for different methods
if exp == 'partition':
ExpGraphPartition(args)
elif exp == 'unlearning':
ExpUnlearning(args)
elif exp == 'node_edge_unlearning':
ExpNodeEdgeUnlearning(args)
elif exp == 'attack_unlearning':
ExpAttackUnlearning(args)
else:
raise Exception('unsupported attack')
if __name__ == "__main__":
args = parameter_parser()
main(args, args['exp'])
| 1,499 | 27.846154 | 131 | py |
Graph-Unlearning | Graph-Unlearning-main/parameter_parser.py | import argparse
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def parameter_parser():
"""
A method to parse up command line parameters.
The default hyper-parameters give a good quality representation without grid search.
"""
parser = argparse.ArgumentParser()
######################### general parameters ################################
parser.add_argument('--is_vary', type=bool, default=False, help='control whether to use multiprocess')
parser.add_argument('--dataset_name', type=str, default='citeseer',
choices=["cora", "citeseer", "pubmed", "Coauthor_CS", "Coauthor_Phys"])
parser.add_argument('--exp', type=str, default='attack_unlearning',
choices=["partition", "unlearning", "node_edge_unlearning", "attack_unlearning"])
parser.add_argument('--cuda', type=int, default=3, help='specify gpu')
parser.add_argument('--num_threads', type=int, default=1)
parser.add_argument('--is_upload', type=str2bool, default=True)
parser.add_argument('--database_name', type=str, default="unlearning_dependant",
choices=['unlearning_dependant', 'unlearning_adaptive',
'unlearning_graph_structure', 'gnn_unlearning_shards',
'unlearning_delta_plot', 'gnn_unlearning_utility',
'unlearning_ratio', 'unlearning_partition_baseline',
'unlearning_ratio', 'attack_unlearning'])
########################## graph partition parameters ######################
parser.add_argument('--is_split', type=str2bool, default=True)
parser.add_argument('--test_ratio', type=float, default=0.1)
parser.add_argument('--use_test_neighbors', type=str2bool, default=True)
parser.add_argument('--is_partition', type=str2bool, default=True)
parser.add_argument('--is_prune', type=str2bool, default=False)
parser.add_argument('--num_shards', type=int, default=10)
parser.add_argument('--is_constrained', type=str2bool, default=True)
parser.add_argument('--is_gen_embedding', type=str2bool, default=True)
parser.add_argument('--partition_method', type=str, default='sage_km',
choices=["sage_km", "random", "lpa", "metis", "lpa_base", "sage_km_base"])
parser.add_argument('--terminate_delta', type=int, default=0)
parser.add_argument('--shard_size_delta', type=float, default=0.005)
########################## unlearning parameters ###########################
parser.add_argument('--repartition', type=str2bool, default=False)
########################## training parameters ###########################
parser.add_argument('--is_train_target_model', type=str2bool, default=True)
parser.add_argument('--is_use_node_feature', type=str2bool, default=False)
parser.add_argument('--is_use_batch', type=str2bool, default=True, help="Use batch train GNN models.")
parser.add_argument('--target_model', type=str, default='GAT', choices=["SAGE", "GAT", 'MLP', "GCN", "GIN"])
parser.add_argument('--train_lr', type=float, default=0.01)
parser.add_argument('--train_weight_decay', type=float, default=0)
parser.add_argument('--num_epochs', type=int, default=100)
parser.add_argument('--num_runs', type=int, default=1)
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--test_batch_size', type=int, default=64)
parser.add_argument('--aggregator', type=str, default='mean', choices=['mean', 'majority', 'optimal'])
parser.add_argument('--opt_lr', type=float, default=0.001)
parser.add_argument('--opt_decay', type=float, default=0.0001)
parser.add_argument('--opt_num_epochs', type=int, default=50)
parser.add_argument('--unlearning_request', type=str, default='random', choices=['random', 'adaptive', 'dependant', 'top1', 'last5'])
########################## analysis parameters ###################################
parser.add_argument('--num_unlearned_nodes', type=int, default=1)
parser.add_argument('--ratio_unlearned_nodes', type=float, default=0.005)
parser.add_argument('--num_unlearned_edges', type=int, default=1)
parser.add_argument('--ratio_deleted_edges', type=float, default=0.9)
parser.add_argument('--num_opt_samples', type=int, default=1000)
args = vars(parser.parse_args())
return args
| 4,665 | 53.255814 | 137 | py |
Graph-Unlearning | Graph-Unlearning-main/config.py | RAW_DATA_PATH = 'temp_data/raw_data/'
PROCESSED_DATA_PATH = 'temp_data/processed_data/'
MODEL_PATH = 'temp_data/models/'
ANALYSIS_PATH = 'temp_data/analysis_data/'
# database name
DATABASE_NAME = "unlearning_gnn" | 213 | 29.571429 | 49 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/node_embedding.py | import logging
import config
from lib_gnn_model.graphsage.graphsage import SAGE
from lib_dataset.data_store import DataStore
class NodeEmbedding:
def __init__(self, args, graph, data):
super(NodeEmbedding, self)
self.logger = logging.getLogger(__name__)
self.args = args
self.graph = graph
self.data = data
self.data_store = DataStore(self.args)
def sage_encoder(self):
if self.args['is_gen_embedding']:
self.logger.info("generating node embeddings with GraphSage...")
node_to_embedding = {}
# run sage
self.target_model = SAGE(self.data.num_features, len(self.data.y.unique()), self.data)
# self.target_model.train_model(50)
# load a pretrained GNN model for generating node embeddings
target_model_name = '_'.join((self.args['target_model'], 'random_1',
str(self.args['shard_size_delta']),
str(self.args['ratio_deleted_edges']), '0_0_1'))
target_model_file = config.MODEL_PATH + self.args['dataset_name'] + '/' + target_model_name
self.target_model.load_model(target_model_file)
logits = self.target_model.generate_embeddings().detach().cpu().numpy()
for node in self.graph.nodes:
node_to_embedding[node] = logits[node]
self.data_store.save_embeddings(node_to_embedding)
else:
node_to_embedding = self.data_store.load_embeddings()
return node_to_embedding
| 1,606 | 34.711111 | 103 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/__init__.py | 0 | 0 | 0 | py |
|
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/walker.py | import itertools
import math
import random
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
from tqdm import trange
from .alias import alias_sample, create_alias_table
from .utils import partition_num
class RandomWalker:
def __init__(self, G, p=1, q=1, use_rejection_sampling=0):
"""
:param G:
:param p: Return parameter,controls the likelihood of immediately revisiting a node in the walk.
:param q: In-out parameter,allows the search to differentiate between “inward” and “outward” nodes
:param use_rejection_sampling: Whether to use the rejection sampling strategy in node2vec.
"""
self.G = G
self.p = p
self.q = q
self.use_rejection_sampling = use_rejection_sampling
def deepwalk_walk(self, walk_length, start_node):
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(self.G.neighbors(cur))
if len(cur_nbrs) > 0:
walk.append(random.choice(cur_nbrs))
else:
break
return walk
def node2vec_walk(self, walk_length, start_node):
G = self.G
alias_nodes = self.alias_nodes
alias_edges = self.alias_edges
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(
cur_nbrs[alias_sample(alias_nodes[cur][0], alias_nodes[cur][1])])
else:
prev = walk[-2]
edge = (prev, cur)
next_node = cur_nbrs[alias_sample(alias_edges[edge][0],
alias_edges[edge][1])]
walk.append(next_node)
else:
break
return walk
def node2vec_walk2(self, walk_length, start_node):
"""
Reference:
KnightKing: A Fast Distributed Graph Random Walk Engine
http://madsys.cs.tsinghua.edu.cn/publications/SOSP19-yang.pdf
"""
def rejection_sample(inv_p, inv_q, nbrs_num):
upper_bound = max(1.0, max(inv_p, inv_q))
lower_bound = min(1.0, min(inv_p, inv_q))
shatter = 0
second_upper_bound = max(1.0, inv_q)
if (inv_p > second_upper_bound):
shatter = second_upper_bound / nbrs_num
upper_bound = second_upper_bound + shatter
return upper_bound, lower_bound, shatter
G = self.G
alias_nodes = self.alias_nodes
inv_p = 1.0 / self.p
inv_q = 1.0 / self.q
walk = [start_node]
while len(walk) < walk_length:
cur = walk[-1]
cur_nbrs = list(G.neighbors(cur))
if len(cur_nbrs) > 0:
if len(walk) == 1:
walk.append(
cur_nbrs[alias_sample(alias_nodes[cur][0], alias_nodes[cur][1])])
else:
upper_bound, lower_bound, shatter = rejection_sample(
inv_p, inv_q, len(cur_nbrs))
prev = walk[-2]
prev_nbrs = set(G.neighbors(prev))
while True:
prob = random.random() * upper_bound
if (prob + shatter >= upper_bound):
next_node = prev
break
next_node = cur_nbrs[alias_sample(
alias_nodes[cur][0], alias_nodes[cur][1])]
if (prob < lower_bound):
break
if (prob < inv_p and next_node == prev):
break
_prob = 1.0 if next_node in prev_nbrs else inv_q
if (prob < _prob):
break
walk.append(next_node)
else:
break
return walk
def simulate_walks(self, num_walks, walk_length, workers=1, verbose=0):
G = self.G
nodes = list(G.nodes())
results = Parallel(n_jobs=workers, verbose=verbose, )(
delayed(self._simulate_walks)(nodes, num, walk_length) for num in
partition_num(num_walks, workers))
walks = list(itertools.chain(*results))
return walks
def _simulate_walks(self, nodes, num_walks, walk_length,):
walks = []
for _ in range(num_walks):
random.shuffle(nodes)
for v in nodes:
if self.p == 1 and self.q == 1:
walks.append(self.deepwalk_walk(
walk_length=walk_length, start_node=v))
elif self.use_rejection_sampling:
walks.append(self.node2vec_walk2(
walk_length=walk_length, start_node=v))
else:
walks.append(self.node2vec_walk(
walk_length=walk_length, start_node=v))
return walks
def get_alias_edge(self, t, v):
"""
compute unnormalized transition probability between nodes v and its neighbors give the previous visited node t.
:param t:
:param v:
:return:
"""
G = self.G
p = self.p
q = self.q
unnormalized_probs = []
for x in G.neighbors(v):
weight = G[v][x].get('weight', 1.0) # w_vx
if x == t: # d_tx == 0
unnormalized_probs.append(weight/p)
elif G.has_edge(x, t): # d_tx == 1
unnormalized_probs.append(weight)
else: # d_tx > 1
unnormalized_probs.append(weight/q)
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob)/norm_const for u_prob in unnormalized_probs]
return create_alias_table(normalized_probs)
def preprocess_transition_probs(self):
"""
Preprocessing of transition probabilities for guiding the random walks.
"""
G = self.G
alias_nodes = {}
for node in G.nodes():
unnormalized_probs = [G[node][nbr].get('weight', 1.0)
for nbr in G.neighbors(node)]
norm_const = sum(unnormalized_probs)
normalized_probs = [
float(u_prob)/norm_const for u_prob in unnormalized_probs]
alias_nodes[node] = create_alias_table(normalized_probs)
if not self.use_rejection_sampling:
alias_edges = {}
for edge in G.edges():
alias_edges[edge] = self.get_alias_edge(edge[0], edge[1])
if not G.is_directed():
alias_edges[(edge[1], edge[0])] = self.get_alias_edge(edge[1], edge[0])
self.alias_edges = alias_edges
self.alias_nodes = alias_nodes
return
class BiasedWalker:
def __init__(self, idx2node, temp_path):
self.idx2node = idx2node
self.idx = list(range(len(self.idx2node)))
self.temp_path = temp_path
pass
def simulate_walks(self, num_walks, walk_length, stay_prob=0.3, workers=1, verbose=0):
layers_adj = pd.read_pickle(self.temp_path+'layers_adj.pkl')
layers_alias = pd.read_pickle(self.temp_path+'layers_alias.pkl')
layers_accept = pd.read_pickle(self.temp_path+'layers_accept.pkl')
gamma = pd.read_pickle(self.temp_path+'gamma.pkl')
walks = []
initialLayer = 0
nodes = self.idx # list(self.g.nodes())
results = Parallel(n_jobs=workers, verbose=verbose, )(
delayed(self._simulate_walks)(nodes, num, walk_length, stay_prob, layers_adj, layers_accept, layers_alias, gamma) for num in
partition_num(num_walks, workers))
walks = list(itertools.chain(*results))
return walks
def _simulate_walks(self, nodes, num_walks, walk_length, stay_prob, layers_adj, layers_accept, layers_alias, gamma):
walks = []
for _ in range(num_walks):
random.shuffle(nodes)
for v in nodes:
walks.append(self._exec_random_walk(layers_adj, layers_accept, layers_alias,
v, walk_length, gamma, stay_prob))
return walks
def _exec_random_walk(self, graphs, layers_accept, layers_alias, v, walk_length, gamma, stay_prob=0.3):
initialLayer = 0
layer = initialLayer
path = []
path.append(self.idx2node[v])
while len(path) < walk_length:
r = random.random()
if(r < stay_prob): # same layer
v = chooseNeighbor(v, graphs, layers_alias,
layers_accept, layer)
path.append(self.idx2node[v])
else: # different layer
r = random.random()
try:
x = math.log(gamma[layer][v] + math.e)
p_moveup = (x / (x + 1))
except:
print(layer, v)
raise ValueError()
if(r > p_moveup):
if(layer > initialLayer):
layer = layer - 1
else:
if((layer + 1) in graphs and v in graphs[layer + 1]):
layer = layer + 1
return path
def chooseNeighbor(v, graphs, layers_alias, layers_accept, layer):
v_list = graphs[layer][v]
idx = alias_sample(layers_accept[layer][v], layers_alias[layer][v])
v = v_list[idx]
return v
| 9,789 | 34.34296 | 136 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/classify.py | from __future__ import print_function
import numpy
from sklearn.metrics import f1_score, accuracy_score
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import MultiLabelBinarizer
class TopKRanker(OneVsRestClassifier):
def predict(self, X, top_k_list):
probs = numpy.asarray(super(TopKRanker, self).predict_proba(X))
all_labels = []
for i, k in enumerate(top_k_list):
probs_ = probs[i, :]
labels = self.classes_[probs_.argsort()[-k:]].tolist()
probs_[:] = 0
probs_[labels] = 1
all_labels.append(probs_)
return numpy.asarray(all_labels)
class Classifier(object):
def __init__(self, embeddings, clf):
self.embeddings = embeddings
self.clf = TopKRanker(clf)
self.binarizer = MultiLabelBinarizer(sparse_output=True)
def train(self, X, Y, Y_all):
self.binarizer.fit(Y_all)
X_train = [self.embeddings[x] for x in X]
Y = self.binarizer.transform(Y)
self.clf.fit(X_train, Y)
def evaluate(self, X, Y):
top_k_list = [len(l) for l in Y]
Y_ = self.predict(X, top_k_list)
Y = self.binarizer.transform(Y)
averages = ["micro", "macro", "samples", "weighted"]
results = {}
for average in averages:
results[average] = f1_score(Y, Y_, average=average)
results['acc'] = accuracy_score(Y,Y_)
print('-------------------')
print(results)
return results
print('-------------------')
def predict(self, X, top_k_list):
X_ = numpy.asarray([self.embeddings[x] for x in X])
Y = self.clf.predict(X_, top_k_list=top_k_list)
return Y
def split_train_evaluate(self, X, Y, train_precent, seed=0):
state = numpy.random.get_state()
training_size = int(train_precent * len(X))
numpy.random.seed(seed)
shuffle_indices = numpy.random.permutation(numpy.arange(len(X)))
X_train = [X[shuffle_indices[i]] for i in range(training_size)]
Y_train = [Y[shuffle_indices[i]] for i in range(training_size)]
X_test = [X[shuffle_indices[i]] for i in range(training_size, len(X))]
Y_test = [Y[shuffle_indices[i]] for i in range(training_size, len(X))]
self.train(X_train, Y_train, Y)
numpy.random.set_state(state)
return self.evaluate(X_test, Y_test)
def read_node_label(filename, skip_head=False):
fin = open(filename, 'r')
X = []
Y = []
while 1:
if skip_head:
fin.readline()
l = fin.readline()
if l == '':
break
vec = l.strip().split(' ')
X.append(vec[0])
Y.append(vec[1:])
fin.close()
return X, Y
| 2,772 | 31.244186 | 78 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/alias.py | import numpy as np
def create_alias_table(area_ratio):
"""
:param area_ratio: sum(area_ratio)=1
:return: accept,alias
"""
l = len(area_ratio)
accept, alias = [0] * l, [0] * l
small, large = [], []
area_ratio_ = np.array(area_ratio) * l
for i, prob in enumerate(area_ratio_):
if prob < 1.0:
small.append(i)
else:
large.append(i)
while small and large:
small_idx, large_idx = small.pop(), large.pop()
accept[small_idx] = area_ratio_[small_idx]
alias[small_idx] = large_idx
area_ratio_[large_idx] = area_ratio_[large_idx] - \
(1 - area_ratio_[small_idx])
if area_ratio_[large_idx] < 1.0:
small.append(large_idx)
else:
large.append(large_idx)
while large:
large_idx = large.pop()
accept[large_idx] = 1
while small:
small_idx = small.pop()
accept[small_idx] = 1
return accept, alias
def alias_sample(accept, alias):
"""
:param accept:
:param alias:
:return: sample index
"""
N = len(accept)
i = int(np.random.random()*N)
r = np.random.random()
if r < accept[i]:
return i
else:
return alias[i]
| 1,261 | 21.945455 | 59 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/utils.py | def preprocess_nxgraph(graph):
node2idx = {}
idx2node = []
node_size = 0
for node in graph.nodes():
node2idx[node] = node_size
idx2node.append(node)
node_size += 1
return idx2node, node2idx
def partition_dict(vertices, workers):
batch_size = (len(vertices) - 1) // workers + 1
part_list = []
part = []
count = 0
for v1, nbs in vertices.items():
part.append((v1, nbs))
count += 1
if count % batch_size == 0:
part_list.append(part)
part = []
if len(part) > 0:
part_list.append(part)
return part_list
def partition_list(vertices, workers):
batch_size = (len(vertices) - 1) // workers + 1
part_list = []
part = []
count = 0
for v1, nbs in enumerate(vertices):
part.append((v1, nbs))
count += 1
if count % batch_size == 0:
part_list.append(part)
part = []
if len(part) > 0:
part_list.append(part)
return part_list
def partition_num(num, workers):
if num % workers == 0:
return [num//workers]*workers
else:
return [num//workers]*workers + [num % workers]
| 1,191 | 23.326531 | 55 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/__init__.py | from .models import * | 21 | 21 | 21 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/models/deepwalk.py | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Perozzi B, Al-Rfou R, Skiena S. Deepwalk: Online learning of social representations[C]//Proceedings of the 20th ACM SIGKDD international conference on Knowledge discovery and data mining. ACM, 2014: 701-710.(http://www.perozzi.net/publications/14_kdd_deepwalk.pdf)
"""
from ..walker import RandomWalker
from gensim.models import Word2Vec
import pandas as pd
class DeepWalk:
def __init__(self, graph, walk_length, num_walks, workers=1):
self.graph = graph
self.w2v_model = None
self._embeddings = {}
self.walker = RandomWalker(
graph, p=1, q=1, )
self.sentences = self.walker.simulate_walks(
num_walks=num_walks, walk_length=walk_length, workers=workers, verbose=1)
def train(self, embed_size=128, window_size=5, workers=3, iter=5, **kwargs):
kwargs["sentences"] = self.sentences
kwargs["min_count"] = kwargs.get("min_count", 0)
kwargs["size"] = embed_size
kwargs["sg"] = 1 # skip gram
kwargs["hs"] = 1 # deepwalk use Hierarchical Softmax
kwargs["workers"] = workers
kwargs["window"] = window_size
kwargs["iter"] = iter
print("Learning embedding vectors...")
model = Word2Vec(**kwargs)
print("Learning embedding vectors done!")
self.w2v_model = model
return model
def get_embeddings(self,):
if self.w2v_model is None:
print("model not train")
return {}
self._embeddings = {}
for word in self.graph.nodes():
self._embeddings[word] = self.w2v_model.wv[word]
return self._embeddings
| 1,742 | 25.815385 | 272 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/models/node2vec.py | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Grover A, Leskovec J. node2vec: Scalable feature learning for networks[C]//Proceedings of the 22nd ACM SIGKDD international conference on Knowledge discovery and data mining. ACM, 2016: 855-864.(https://www.kdd.org/kdd2016/papers/files/rfp0218-groverA.pdf)
"""
from gensim.models import Word2Vec
import pandas as pd
from ..walker import RandomWalker
class Node2Vec:
def __init__(self, graph, walk_length, num_walks, p=1.0, q=1.0, workers=1, use_rejection_sampling=0):
self.graph = graph
self._embeddings = {}
self.walker = RandomWalker(
graph, p=p, q=q, use_rejection_sampling=use_rejection_sampling)
print("Preprocess transition probs...")
self.walker.preprocess_transition_probs()
self.sentences = self.walker.simulate_walks(
num_walks=num_walks, walk_length=walk_length, workers=workers, verbose=1)
def train(self, embed_size=128, window_size=5, workers=3, iter=5, **kwargs):
kwargs["sentences"] = self.sentences
kwargs["min_count"] = kwargs.get("min_count", 0)
kwargs["size"] = embed_size
kwargs["sg"] = 1
kwargs["hs"] = 0 # node2vec not use Hierarchical Softmax
kwargs["workers"] = workers
kwargs["window"] = window_size
kwargs["iter"] = iter
print("Learning embedding vectors...")
model = Word2Vec(**kwargs)
print("Learning embedding vectors done!")
self.w2v_model = model
return model
def get_embeddings(self,):
if self.w2v_model is None:
print("model not train")
return {}
self._embeddings = {}
for word in self.graph.nodes():
self._embeddings[word] = self.w2v_model.wv[word]
return self._embeddings
| 1,883 | 25.535211 | 264 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/models/sdne.py | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Wang D, Cui P, Zhu W. Structural deep network embedding[C]//Proceedings of the 22nd ACM SIGKDD international conference on Knowledge discovery and data mining. ACM, 2016: 1225-1234.(https://www.kdd.org/kdd2016/papers/files/rfp0191-wangAemb.pdf)
"""
import time
import numpy as np
import scipy.sparse as sp
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.callbacks import History
from tensorflow.python.keras.layers import Dense, Input
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.regularizers import l1_l2
from ..utils import preprocess_nxgraph
def l_2nd(beta):
def loss_2nd(y_true, y_pred):
b_ = np.ones_like(y_true)
b_[y_true != 0] = beta
x = K.square((y_true - y_pred) * b_)
t = K.sum(x, axis=-1, )
return K.mean(t)
return loss_2nd
def l_1st(alpha):
def loss_1st(y_true, y_pred):
L = y_true
Y = y_pred
batch_size = tf.to_float(K.shape(L)[0])
return alpha * 2 * tf.linalg.trace(tf.matmul(tf.matmul(Y, L, transpose_a=True), Y)) / batch_size
return loss_1st
def create_model(node_size, hidden_size=[256, 128], l1=1e-5, l2=1e-4):
A = Input(shape=(node_size,))
L = Input(shape=(None,))
fc = A
for i in range(len(hidden_size)):
if i == len(hidden_size) - 1:
fc = Dense(hidden_size[i], activation='relu',
kernel_regularizer=l1_l2(l1, l2), name='1st')(fc)
else:
fc = Dense(hidden_size[i], activation='relu',
kernel_regularizer=l1_l2(l1, l2))(fc)
Y = fc
for i in reversed(range(len(hidden_size) - 1)):
fc = Dense(hidden_size[i], activation='relu',
kernel_regularizer=l1_l2(l1, l2))(fc)
A_ = Dense(node_size, 'relu', name='2nd')(fc)
model = Model(inputs=[A, L], outputs=[A_, Y])
emb = Model(inputs=A, outputs=Y)
return model, emb
class SDNE(object):
def __init__(self, graph, hidden_size=[32, 16], alpha=1e-6, beta=5., nu1=1e-5, nu2=1e-4, ):
self.graph = graph
# self.g.remove_edges_from(self.g.selfloop_edges())
self.idx2node, self.node2idx = preprocess_nxgraph(self.graph)
self.node_size = self.graph.number_of_nodes()
self.hidden_size = hidden_size
self.alpha = alpha
self.beta = beta
self.nu1 = nu1
self.nu2 = nu2
self.A, self.L = self._create_A_L(
self.graph, self.node2idx) # Adj Matrix,L Matrix
self.reset_model()
self.inputs = [self.A, self.L]
self._embeddings = {}
def reset_model(self, opt='adam'):
self.model, self.emb_model = create_model(self.node_size, hidden_size=self.hidden_size, l1=self.nu1,
l2=self.nu2)
self.model.compile(opt, [l_2nd(self.beta), l_1st(self.alpha)])
self.get_embeddings()
def train(self, batch_size=1024, epochs=1, initial_epoch=0, verbose=1):
if batch_size >= self.node_size:
if batch_size > self.node_size:
print('batch_size({0}) > node_size({1}),set batch_size = {1}'.format(
batch_size, self.node_size))
batch_size = self.node_size
return self.model.fit([self.A.todense(), self.L.todense()], [self.A.todense(), self.L.todense()],
batch_size=batch_size, epochs=epochs, initial_epoch=initial_epoch, verbose=verbose,
shuffle=False, )
else:
steps_per_epoch = (self.node_size - 1) // batch_size + 1
hist = History()
hist.on_train_begin()
logs = {}
for epoch in range(initial_epoch, epochs):
start_time = time.time()
losses = np.zeros(3)
for i in range(steps_per_epoch):
index = np.arange(
i * batch_size, min((i + 1) * batch_size, self.node_size))
A_train = self.A[index, :].todense()
L_mat_train = self.L[index][:, index].todense()
inp = [A_train, L_mat_train]
batch_losses = self.model.train_on_batch(inp, inp)
losses += batch_losses
losses = losses / steps_per_epoch
logs['loss'] = losses[0]
logs['2nd_loss'] = losses[1]
logs['1st_loss'] = losses[2]
epoch_time = int(time.time() - start_time)
hist.on_epoch_end(epoch, logs)
if verbose > 0:
print('Epoch {0}/{1}'.format(epoch + 1, epochs))
print('{0}s - loss: {1: .4f} - 2nd_loss: {2: .4f} - 1st_loss: {3: .4f}'.format(
epoch_time, losses[0], losses[1], losses[2]))
return hist
def evaluate(self, ):
return self.model.evaluate(x=self.inputs, y=self.inputs, batch_size=self.node_size)
def get_embeddings(self):
self._embeddings = {}
embeddings = self.emb_model.predict(self.A.todense(), batch_size=self.node_size)
look_back = self.idx2node
for i, embedding in enumerate(embeddings):
self._embeddings[look_back[i]] = embedding
return self._embeddings
def _create_A_L(self, graph, node2idx):
node_size = graph.number_of_nodes()
A_data = []
A_row_index = []
A_col_index = []
for edge in graph.edges():
v1, v2 = edge
edge_weight = graph[v1][v2].get('weight', 1)
A_data.append(edge_weight)
A_row_index.append(node2idx[v1])
A_col_index.append(node2idx[v2])
A = sp.csr_matrix((A_data, (A_row_index, A_col_index)), shape=(node_size, node_size))
A_ = sp.csr_matrix((A_data + A_data, (A_row_index + A_col_index, A_col_index + A_row_index)),
shape=(node_size, node_size))
D = sp.diags(A_.sum(axis=1).flatten().tolist()[0])
L = D - A_
return A, L
| 6,214 | 34.514286 | 252 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/models/struc2vec.py | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Ribeiro L F R, Saverese P H P, Figueiredo D R. struc2vec: Learning node representations from structural identity[C]//Proceedings of the 23rd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining. ACM, 2017: 385-394.(https://arxiv.org/pdf/1704.03165.pdf)
"""
import math
import os
import shutil
from collections import ChainMap, deque
import numpy as np
import pandas as pd
from fastdtw import fastdtw
from gensim.models import Word2Vec
from joblib import Parallel, delayed
from tqdm import tqdm
from ..alias import create_alias_table
from ..utils import partition_dict, preprocess_nxgraph
from ..walker import BiasedWalker
class Struc2Vec():
def __init__(self, graph, walk_length=10, num_walks=100, workers=1, verbose=0, stay_prob=0.3, opt1_reduce_len=True, opt2_reduce_sim_calc=True, opt3_num_layers=None, temp_path='./temp_struc2vec/', reuse=False):
self.graph = graph
self.idx2node, self.node2idx = preprocess_nxgraph(graph)
self.idx = list(range(len(self.idx2node)))
self.opt1_reduce_len = opt1_reduce_len
self.opt2_reduce_sim_calc = opt2_reduce_sim_calc
self.opt3_num_layers = opt3_num_layers
self.resue = reuse
self.temp_path = temp_path
if not os.path.exists(self.temp_path):
os.mkdir(self.temp_path)
if not reuse:
shutil.rmtree(self.temp_path)
os.mkdir(self.temp_path)
self.create_context_graph(self.opt3_num_layers, workers, verbose)
self.prepare_biased_walk()
self.walker = BiasedWalker(self.idx2node, self.temp_path)
self.sentences = self.walker.simulate_walks(
num_walks, walk_length, stay_prob, workers, verbose)
self._embeddings = {}
def create_context_graph(self, max_num_layers, workers=1, verbose=0,):
pair_distances = self._compute_structural_distance(
max_num_layers, workers, verbose,)
layers_adj, layers_distances = self._get_layer_rep(pair_distances)
pd.to_pickle(layers_adj, self.temp_path + 'layers_adj.pkl')
layers_accept, layers_alias = self._get_transition_probs(
layers_adj, layers_distances)
pd.to_pickle(layers_alias, self.temp_path + 'layers_alias.pkl')
pd.to_pickle(layers_accept, self.temp_path + 'layers_accept.pkl')
def prepare_biased_walk(self,):
sum_weights = {}
sum_edges = {}
average_weight = {}
gamma = {}
layer = 0
while (os.path.exists(self.temp_path+'norm_weights_distance-layer-' + str(layer)+'.pkl')):
probs = pd.read_pickle(
self.temp_path+'norm_weights_distance-layer-' + str(layer)+'.pkl')
for v, list_weights in probs.items():
sum_weights.setdefault(layer, 0)
sum_edges.setdefault(layer, 0)
sum_weights[layer] += sum(list_weights)
sum_edges[layer] += len(list_weights)
average_weight[layer] = sum_weights[layer] / sum_edges[layer]
gamma.setdefault(layer, {})
for v, list_weights in probs.items():
num_neighbours = 0
for w in list_weights:
if (w > average_weight[layer]):
num_neighbours += 1
gamma[layer][v] = num_neighbours
layer += 1
pd.to_pickle(average_weight, self.temp_path + 'average_weight')
pd.to_pickle(gamma, self.temp_path + 'gamma.pkl')
def train(self, embed_size=128, window_size=5, workers=3, iter=5):
# pd.read_pickle(self.temp_path+'walks.pkl')
sentences = self.sentences
print("Learning representation...")
model = Word2Vec(sentences, size=embed_size, window=window_size, min_count=0, hs=1, sg=1, workers=workers,
iter=iter)
print("Learning representation done!")
self.w2v_model = model
return model
def get_embeddings(self,):
if self.w2v_model is None:
print("model not train")
return {}
self._embeddings = {}
for word in self.graph.nodes():
self._embeddings[word] = self.w2v_model.wv[word]
return self._embeddings
def _compute_ordered_degreelist(self, max_num_layers):
degreeList = {}
vertices = self.idx # self.g.nodes()
for v in vertices:
degreeList[v] = self._get_order_degreelist_node(v, max_num_layers)
return degreeList
def _get_order_degreelist_node(self, root, max_num_layers=None):
if max_num_layers is None:
max_num_layers = float('inf')
ordered_degree_sequence_dict = {}
visited = [False] * len(self.graph.nodes())
queue = deque()
level = 0
queue.append(root)
visited[root] = True
while (len(queue) > 0 and level <= max_num_layers):
count = len(queue)
if self.opt1_reduce_len:
degree_list = {}
else:
degree_list = []
while (count > 0):
top = queue.popleft()
node = self.idx2node[top]
degree = len(self.graph[node])
if self.opt1_reduce_len:
degree_list[degree] = degree_list.get(degree, 0) + 1
else:
degree_list.append(degree)
for nei in self.graph[node]:
nei_idx = self.node2idx[nei]
if not visited[nei_idx]:
visited[nei_idx] = True
queue.append(nei_idx)
count -= 1
if self.opt1_reduce_len:
orderd_degree_list = [(degree, freq)
for degree, freq in degree_list.items()]
orderd_degree_list.sort(key=lambda x: x[0])
else:
orderd_degree_list = sorted(degree_list)
ordered_degree_sequence_dict[level] = orderd_degree_list
level += 1
return ordered_degree_sequence_dict
def _compute_structural_distance(self, max_num_layers, workers=1, verbose=0,):
if os.path.exists(self.temp_path+'structural_dist.pkl'):
structural_dist = pd.read_pickle(
self.temp_path+'structural_dist.pkl')
else:
if self.opt1_reduce_len:
dist_func = cost_max
else:
dist_func = cost
if os.path.exists(self.temp_path + 'degreelist.pkl'):
degreeList = pd.read_pickle(self.temp_path + 'degreelist.pkl')
else:
degreeList = self._compute_ordered_degreelist(max_num_layers)
pd.to_pickle(degreeList, self.temp_path + 'degreelist.pkl')
if self.opt2_reduce_sim_calc:
degrees = self._create_vectors()
degreeListsSelected = {}
vertices = {}
n_nodes = len(self.idx)
for v in self.idx: # c:list of vertex
nbs = get_vertices(
v, len(self.graph[self.idx2node[v]]), degrees, n_nodes)
vertices[v] = nbs # store nbs
degreeListsSelected[v] = degreeList[v] # store dist
for n in nbs:
# store dist of nbs
degreeListsSelected[n] = degreeList[n]
else:
vertices = {}
for v in degreeList:
vertices[v] = [vd for vd in degreeList.keys() if vd > v]
results = Parallel(n_jobs=workers, verbose=verbose,)(
delayed(compute_dtw_dist)(part_list, degreeList, dist_func) for part_list in partition_dict(vertices, workers))
dtw_dist = dict(ChainMap(*results))
structural_dist = convert_dtw_struc_dist(dtw_dist)
pd.to_pickle(structural_dist, self.temp_path +
'structural_dist.pkl')
return structural_dist
def _create_vectors(self):
degrees = {} # sotre v list of degree
degrees_sorted = set() # store degree
G = self.graph
for v in self.idx:
degree = len(G[self.idx2node[v]])
degrees_sorted.add(degree)
if (degree not in degrees):
degrees[degree] = {}
degrees[degree]['vertices'] = []
degrees[degree]['vertices'].append(v)
degrees_sorted = np.array(list(degrees_sorted), dtype='int')
degrees_sorted = np.sort(degrees_sorted)
l = len(degrees_sorted)
for index, degree in enumerate(degrees_sorted):
if (index > 0):
degrees[degree]['before'] = degrees_sorted[index - 1]
if (index < (l - 1)):
degrees[degree]['after'] = degrees_sorted[index + 1]
return degrees
def _get_layer_rep(self, pair_distances):
layer_distances = {}
layer_adj = {}
for v_pair, layer_dist in pair_distances.items():
for layer, distance in layer_dist.items():
vx = v_pair[0]
vy = v_pair[1]
layer_distances.setdefault(layer, {})
layer_distances[layer][vx, vy] = distance
layer_adj.setdefault(layer, {})
layer_adj[layer].setdefault(vx, [])
layer_adj[layer].setdefault(vy, [])
layer_adj[layer][vx].append(vy)
layer_adj[layer][vy].append(vx)
return layer_adj, layer_distances
def _get_transition_probs(self, layers_adj, layers_distances):
layers_alias = {}
layers_accept = {}
for layer in layers_adj:
neighbors = layers_adj[layer]
layer_distances = layers_distances[layer]
node_alias_dict = {}
node_accept_dict = {}
norm_weights = {}
for v, neighbors in neighbors.items():
e_list = []
sum_w = 0.0
for n in neighbors:
if (v, n) in layer_distances:
wd = layer_distances[v, n]
else:
wd = layer_distances[n, v]
w = np.exp(-float(wd))
e_list.append(w)
sum_w += w
e_list = [x / sum_w for x in e_list]
norm_weights[v] = e_list
accept, alias = create_alias_table(e_list)
node_alias_dict[v] = alias
node_accept_dict[v] = accept
pd.to_pickle(
norm_weights, self.temp_path + 'norm_weights_distance-layer-' + str(layer)+'.pkl')
layers_alias[layer] = node_alias_dict
layers_accept[layer] = node_accept_dict
return layers_accept, layers_alias
def cost(a, b):
ep = 0.5
m = max(a, b) + ep
mi = min(a, b) + ep
return ((m / mi) - 1)
def cost_min(a, b):
ep = 0.5
m = max(a[0], b[0]) + ep
mi = min(a[0], b[0]) + ep
return ((m / mi) - 1) * min(a[1], b[1])
def cost_max(a, b):
ep = 0.5
m = max(a[0], b[0]) + ep
mi = min(a[0], b[0]) + ep
return ((m / mi) - 1) * max(a[1], b[1])
def convert_dtw_struc_dist(distances, startLayer=1):
"""
:param distances: dict of dict
:param startLayer:
:return:
"""
for vertices, layers in distances.items():
keys_layers = sorted(layers.keys())
startLayer = min(len(keys_layers), startLayer)
for layer in range(0, startLayer):
keys_layers.pop(0)
for layer in keys_layers:
layers[layer] += layers[layer - 1]
return distances
def get_vertices(v, degree_v, degrees, n_nodes):
a_vertices_selected = 2 * math.log(n_nodes, 2)
vertices = []
try:
c_v = 0
for v2 in degrees[degree_v]['vertices']:
if (v != v2):
vertices.append(v2) # same degree
c_v += 1
if (c_v > a_vertices_selected):
raise StopIteration
if ('before' not in degrees[degree_v]):
degree_b = -1
else:
degree_b = degrees[degree_v]['before']
if ('after' not in degrees[degree_v]):
degree_a = -1
else:
degree_a = degrees[degree_v]['after']
if (degree_b == -1 and degree_a == -1):
raise StopIteration # not anymore v
degree_now = verifyDegrees(degrees, degree_v, degree_a, degree_b)
# nearest valid degree
while True:
for v2 in degrees[degree_now]['vertices']:
if (v != v2):
vertices.append(v2)
c_v += 1
if (c_v > a_vertices_selected):
raise StopIteration
if (degree_now == degree_b):
if ('before' not in degrees[degree_b]):
degree_b = -1
else:
degree_b = degrees[degree_b]['before']
else:
if ('after' not in degrees[degree_a]):
degree_a = -1
else:
degree_a = degrees[degree_a]['after']
if (degree_b == -1 and degree_a == -1):
raise StopIteration
degree_now = verifyDegrees(degrees, degree_v, degree_a, degree_b)
except StopIteration:
return list(vertices)
return list(vertices)
def verifyDegrees(degrees, degree_v_root, degree_a, degree_b):
if(degree_b == -1):
degree_now = degree_a
elif(degree_a == -1):
degree_now = degree_b
elif(abs(degree_b - degree_v_root) < abs(degree_a - degree_v_root)):
degree_now = degree_b
else:
degree_now = degree_a
return degree_now
def compute_dtw_dist(part_list, degreeList, dist_func):
dtw_dist = {}
for v1, nbs in part_list:
lists_v1 = degreeList[v1] # lists_v1 :orderd degree list of v1
for v2 in nbs:
lists_v2 = degreeList[v2] # lists_v1 :orderd degree list of v2
max_layer = min(len(lists_v1), len(lists_v2)) # valid layer
dtw_dist[v1, v2] = {}
for layer in range(0, max_layer):
dist, path = fastdtw(
lists_v1[layer], lists_v2[layer], radius=1, dist=dist_func)
dtw_dist[v1, v2][layer] = dist
return dtw_dist
| 14,604 | 32.574713 | 282 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/models/__init__.py | from .deepwalk import DeepWalk
from .node2vec import Node2Vec
from .line import LINE
from .sdne import SDNE
from .struc2vec import Struc2Vec
__all__ = ["DeepWalk", "Node2Vec", "LINE", "SDNE", "Struc2Vec"]
| 207 | 22.111111 | 63 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_node_embedding/ge/models/line.py | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Tang J, Qu M, Wang M, et al. Line: Large-scale information network embedding[C]//Proceedings of the 24th International Conference on World Wide Web. International World Wide Web Conferences Steering Committee, 2015: 1067-1077.(https://arxiv.org/pdf/1503.03578.pdf)
"""
import math
import random
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.layers import Embedding, Input, Lambda
from tensorflow.python.keras.models import Model
from ..alias import create_alias_table, alias_sample
from ..utils import preprocess_nxgraph
def line_loss(y_true, y_pred):
return -K.mean(K.log(K.sigmoid(y_true*y_pred)))
def create_model(numNodes, embedding_size, order='second'):
v_i = Input(shape=(1,))
v_j = Input(shape=(1,))
first_emb = Embedding(numNodes, embedding_size, name='first_emb')
second_emb = Embedding(numNodes, embedding_size, name='second_emb')
context_emb = Embedding(numNodes, embedding_size, name='context_emb')
v_i_emb = first_emb(v_i)
v_j_emb = first_emb(v_j)
v_i_emb_second = second_emb(v_i)
v_j_context_emb = context_emb(v_j)
first = Lambda(lambda x: tf.reduce_sum(
x[0]*x[1], axis=-1, keep_dims=False), name='first_order')([v_i_emb, v_j_emb])
second = Lambda(lambda x: tf.reduce_sum(
x[0]*x[1], axis=-1, keep_dims=False), name='second_order')([v_i_emb_second, v_j_context_emb])
if order == 'first':
output_list = [first]
elif order == 'second':
output_list = [second]
else:
output_list = [first, second]
model = Model(inputs=[v_i, v_j], outputs=output_list)
return model, {'first': first_emb, 'second': second_emb}
class LINE:
def __init__(self, graph, embedding_size=8, negative_ratio=5, order='second',):
"""
:param graph:
:param embedding_size:
:param negative_ratio:
:param order: 'first','second','all'
"""
if order not in ['first', 'second', 'all']:
raise ValueError('mode must be fisrt,second,or all')
self.graph = graph
self.idx2node, self.node2idx = preprocess_nxgraph(graph)
self.use_alias = True
self.rep_size = embedding_size
self.order = order
self._embeddings = {}
self.negative_ratio = negative_ratio
self.order = order
self.node_size = graph.number_of_nodes()
self.edge_size = graph.number_of_edges()
self.samples_per_epoch = self.edge_size*(1+negative_ratio)
self._gen_sampling_table()
self.reset_model()
def reset_training_config(self, batch_size, times):
self.batch_size = batch_size
self.steps_per_epoch = (
(self.samples_per_epoch - 1) // self.batch_size + 1)*times
def reset_model(self, opt='adam'):
self.model, self.embedding_dict = create_model(
self.node_size, self.rep_size, self.order)
self.model.compile(opt, line_loss)
self.batch_it = self.batch_iter(self.node2idx)
def _gen_sampling_table(self):
# create sampling table for vertex
power = 0.75
numNodes = self.node_size
node_degree = np.zeros(numNodes) # out degree
node2idx = self.node2idx
for edge in self.graph.edges():
node_degree[node2idx[edge[0]]
] += self.graph[edge[0]][edge[1]].get('weight', 1.0)
total_sum = sum([math.pow(node_degree[i], power)
for i in range(numNodes)])
norm_prob = [float(math.pow(node_degree[j], power)) /
total_sum for j in range(numNodes)]
self.node_accept, self.node_alias = create_alias_table(norm_prob)
# create sampling table for edge
numEdges = self.graph.number_of_edges()
total_sum = sum([self.graph[edge[0]][edge[1]].get('weight', 1.0)
for edge in self.graph.edges()])
norm_prob = [self.graph[edge[0]][edge[1]].get('weight', 1.0) *
numEdges / total_sum for edge in self.graph.edges()]
self.edge_accept, self.edge_alias = create_alias_table(norm_prob)
def batch_iter(self, node2idx):
edges = [(node2idx[x[0]], node2idx[x[1]]) for x in self.graph.edges()]
data_size = self.graph.number_of_edges()
shuffle_indices = np.random.permutation(np.arange(data_size))
# positive or negative mod
mod = 0
mod_size = 1 + self.negative_ratio
h = []
t = []
sign = 0
count = 0
start_index = 0
end_index = min(start_index + self.batch_size, data_size)
while True:
if mod == 0:
h = []
t = []
for i in range(start_index, end_index):
if random.random() >= self.edge_accept[shuffle_indices[i]]:
shuffle_indices[i] = self.edge_alias[shuffle_indices[i]]
cur_h = edges[shuffle_indices[i]][0]
cur_t = edges[shuffle_indices[i]][1]
h.append(cur_h)
t.append(cur_t)
sign = np.ones(len(h))
else:
sign = np.ones(len(h))*-1
t = []
for i in range(len(h)):
t.append(alias_sample(
self.node_accept, self.node_alias))
if self.order == 'all':
yield ([np.array(h), np.array(t)], [sign, sign])
else:
yield ([np.array(h), np.array(t)], [sign])
mod += 1
mod %= mod_size
if mod == 0:
start_index = end_index
end_index = min(start_index + self.batch_size, data_size)
if start_index >= data_size:
count += 1
mod = 0
h = []
shuffle_indices = np.random.permutation(np.arange(data_size))
start_index = 0
end_index = min(start_index + self.batch_size, data_size)
def get_embeddings(self,):
self._embeddings = {}
if self.order == 'first':
embeddings = self.embedding_dict['first'].get_weights()[0]
elif self.order == 'second':
embeddings = self.embedding_dict['second'].get_weights()[0]
else:
embeddings = np.hstack((self.embedding_dict['first'].get_weights()[
0], self.embedding_dict['second'].get_weights()[0]))
idx2node = self.idx2node
for i, embedding in enumerate(embeddings):
self._embeddings[idx2node[i]] = embedding
return self._embeddings
def train(self, batch_size=1024, epochs=1, initial_epoch=0, verbose=1, times=1):
self.reset_training_config(batch_size, times)
hist = self.model.fit_generator(self.batch_it, epochs=epochs, initial_epoch=initial_epoch, steps_per_epoch=self.steps_per_epoch,
verbose=verbose)
return hist
| 7,184 | 32.574766 | 272 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_utils/utils.py | import os
import errno
import numpy as np
import pandas as pd
import networkx as nx
import torch
from scipy.sparse import coo_matrix
from tqdm import tqdm
def graph_reader(path):
"""
Function to read the graph from the path.
:param path: Path to the edge list.
:return graph: NetworkX object returned.
"""
graph = nx.from_edgelist(pd.read_csv(path).values.tolist())
return graph
def feature_reader(path):
"""
Reading the sparse feature matrix stored as csv from the disk.
:param path: Path to the csv file.
:return features: Dense matrix of features.
"""
features = pd.read_csv(path)
node_index = features["node_id"].values.tolist()
feature_index = features["feature_id"].values.tolist()
feature_values = features["value"].values.tolist()
node_count = max(node_index) + 1
feature_count = max(feature_index) + 1
features = coo_matrix((feature_values, (node_index, feature_index)), shape=(node_count, feature_count)).toarray()
return features
def target_reader(path):
"""
Reading the target vector from disk.
:param path: Path to the target.
:return target: Target vector.
"""
target = np.array(pd.read_csv(path)["target"]).reshape(-1, 1)
return target
def make_adjacency(graph, max_degree, sel=None):
all_nodes = np.array(graph.nodes())
# Initialize w/ links to a dummy node
n_nodes = len(all_nodes)
adj = (np.zeros((n_nodes + 1, max_degree)) + n_nodes).astype(int)
if sel is not None:
# only look at nodes in training set
all_nodes = all_nodes[sel]
for node in tqdm(all_nodes):
neibs = np.array(list(graph.neighbors(node)))
if sel is not None:
neibs = neibs[sel[neibs]]
if len(neibs) > 0:
if len(neibs) > max_degree:
neibs = np.random.choice(neibs, max_degree, replace=False)
elif len(neibs) < max_degree:
extra = np.random.choice(neibs, max_degree - neibs.shape[0], replace=True)
neibs = np.concatenate([neibs, extra])
adj[node, :] = neibs
return adj
def connected_component_subgraphs(graph):
"""
Find all connected subgraphs in a networkx Graph
Args:
graph (Graph): A networkx Graph
Yields:
generator: A subgraph generator
"""
for c in nx.connected_components(graph):
yield graph.subgraph(c)
def check_exist(file_name):
if not os.path.exists(os.path.dirname(file_name)):
try:
os.makedirs(os.path.dirname(file_name))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def filter_edge_index(edge_index, node_indices, reindex=True):
assert np.all(np.diff(node_indices) >= 0), 'node_indices must be sorted'
if isinstance(edge_index, torch.Tensor):
edge_index = edge_index.cpu()
node_index = np.isin(edge_index, node_indices)
col_index = np.nonzero(np.logical_and(node_index[0], node_index[1]))[0]
edge_index = edge_index[:, col_index]
if reindex:
return np.searchsorted(node_indices, edge_index)
else:
return edge_index
def pyg_to_nx(data):
"""
Convert a torch geometric Data to networkx Graph.
Args:
data (Data): A torch geometric Data.
Returns:
Graph: A networkx Graph.
"""
graph = nx.Graph()
graph.add_nodes_from(np.arange(data.num_nodes))
edge_index = data.edge_index.numpy()
for u, v in np.transpose(edge_index):
graph.add_edge(u, v)
return graph
def edge_index_to_nx(edge_index, num_nodes):
"""
Convert a torch geometric Data to networkx Graph by edge_index.
Args:
edge_index (Data.edge_index): A torch geometric Data.
num_nodes (int): Number of nodes in a graph.
Returns:
Graph: networkx Graph
"""
graph = nx.Graph()
graph.add_nodes_from(np.arange(num_nodes))
edge_index = edge_index.numpy()
for u, v in np.transpose(edge_index):
graph.add_edge(u, v)
return graph
def filter_edge_index_1(data, node_indices):
"""
Remove unnecessary edges from a torch geometric Data, only keep the edges between node_indices.
Args:
data (Data): A torch geometric Data.
node_indices (list): A list of nodes to be deleted from data.
Returns:
data.edge_index: The new edge_index after removing the node_indices.
"""
if isinstance(data.edge_index, torch.Tensor):
data.edge_index = data.edge_index.cpu()
edge_index = data.edge_index
node_index = np.isin(edge_index, node_indices)
col_index = np.nonzero(np.logical_and(node_index[0], node_index[1]))[0]
edge_index = data.edge_index[:, col_index]
return np.searchsorted(node_indices, edge_index)
| 4,851 | 27.046243 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_utils/logger.py | from texttable import Texttable
def tab_printer(args):
"""
Function to print the logs in a nice tabular format.
:param args: Parameters used for the model.
"""
# args = vars(args)
keys = sorted(args.keys())
t = Texttable()
t.add_rows([["Parameter", "Value"]] + [[k.replace("_"," ").capitalize(),args[k]] for k in keys])
print(t.draw()) | 373 | 30.166667 | 101 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/opt_dataset.py | from torch.utils.data import Dataset
class OptDataset(Dataset):
def __init__(self, posteriors, labels):
self.posteriors = posteriors
self.labels = labels
def __getitem__(self, index):
ret_posterior = {}
for shard, post in self.posteriors.items():
ret_posterior[shard] = post[index]
return ret_posterior, self.labels[index]
def __len__(self):
return self.labels.shape[0]
| 448 | 22.631579 | 51 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/optimal_aggregator.py | import copy
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data import DataLoader
from torch_geometric.data import Data
from lib_aggregator.opt_dataset import OptDataset
from lib_dataset.data_store import DataStore
from lib_utils import utils
class OptimalAggregator:
def __init__(self, run, target_model, data, args):
self.logger = logging.getLogger('optimal_aggregator')
self.args = args
self.run = run
self.target_model = target_model
self.data = data
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.num_shards = args['num_shards']
def generate_train_data(self):
data_store = DataStore(self.args)
train_indices, _ = data_store.load_train_test_split()
# sample a set of nodes from train_indices
if self.args["num_opt_samples"] == 1000:
train_indices = np.random.choice(train_indices, size=1000, replace=False)
elif self.args["num_opt_samples"] == 10000:
train_indices = np.random.choice(train_indices, size=int(train_indices.shape[0] * 0.1), replace=False)
elif self.args["num_opt_samples"] == 1:
train_indices = np.random.choice(train_indices, size=int(train_indices.shape[0]), replace=False)
train_indices = np.sort(train_indices)
self.logger.info("Using %s samples for optimization" % (int(train_indices.shape[0])))
x = self.data.x[train_indices]
y = self.data.y[train_indices]
edge_index = utils.filter_edge_index(self.data.edge_index, train_indices)
train_data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
train_data.train_mask = torch.zeros(train_indices.shape[0], dtype=torch.bool)
train_data.test_mask = torch.ones(train_indices.shape[0], dtype=torch.bool)
self.true_labels = y
self.posteriors = {}
for shard in range(self.num_shards):
self.target_model.data = train_data
data_store.load_target_model(self.run, self.target_model, shard)
self.posteriors[shard] = self.target_model.posterior().to(self.device)
def optimization(self):
weight_para = nn.Parameter(torch.full((self.num_shards,), fill_value=1.0 / self.num_shards), requires_grad=True)
optimizer = optim.Adam([weight_para], lr=self.args['opt_lr'])
scheduler = MultiStepLR(optimizer, milestones=[500, 1000], gamma=self.args['opt_lr'])
train_dset = OptDataset(self.posteriors, self.true_labels)
train_loader = DataLoader(train_dset, batch_size=32, shuffle=True, num_workers=0)
min_loss = 1000.0
for epoch in range(self.args['opt_num_epochs']):
loss_all = 0.0
for posteriors, labels in train_loader:
labels = labels.to(self.device)
optimizer.zero_grad()
loss = self._loss_fn(posteriors, labels, weight_para)
loss.backward()
loss_all += loss
optimizer.step()
with torch.no_grad():
weight_para[:] = torch.clamp(weight_para, min=0.0)
scheduler.step()
if loss_all < min_loss:
ret_weight_para = copy.deepcopy(weight_para)
min_loss = loss_all
self.logger.info('epoch: %s, loss: %s' % (epoch, loss_all))
return ret_weight_para / torch.sum(ret_weight_para)
def _loss_fn(self, posteriors, labels, weight_para):
aggregate_posteriors = torch.zeros_like(posteriors[0])
for shard in range(self.num_shards):
aggregate_posteriors += weight_para[shard] * posteriors[shard]
aggregate_posteriors = F.softmax(aggregate_posteriors, dim=1)
loss_1 = F.cross_entropy(aggregate_posteriors, labels)
loss_2 = torch.sqrt(torch.sum(weight_para ** 2))
return loss_1 + loss_2
| 4,054 | 37.990385 | 120 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/aggregator.py | import logging
import torch
torch.cuda.empty_cache()
from sklearn.metrics import f1_score
import numpy as np
from lib_aggregator.optimal_aggregator import OptimalAggregator
from lib_dataset.data_store import DataStore
class Aggregator:
def __init__(self, run, target_model, data, shard_data, args):
self.logger = logging.getLogger('Aggregator')
self.args = args
self.data_store = DataStore(self.args)
self.run = run
self.target_model = target_model
self.data = data
self.shard_data = shard_data
self.num_shards = args['num_shards']
def generate_posterior(self, suffix=""):
self.true_label = self.shard_data[0].y[self.shard_data[0]['test_mask']].detach().cpu().numpy()
self.posteriors = {}
for shard in range(self.args['num_shards']):
self.target_model.data = self.shard_data[shard]
self.data_store.load_target_model(self.run, self.target_model, shard, suffix)
self.posteriors[shard] = self.target_model.posterior()
self.logger.info("Saving posteriors.")
self.data_store.save_posteriors(self.posteriors, self.run, suffix)
def aggregate(self):
if self.args['aggregator'] == 'mean':
aggregate_f1_score = self._mean_aggregator()
elif self.args['aggregator'] == 'optimal':
aggregate_f1_score = self._optimal_aggregator()
elif self.args['aggregator'] == 'majority':
aggregate_f1_score = self._majority_aggregator()
else:
raise Exception("unsupported aggregator.")
return aggregate_f1_score
def _mean_aggregator(self):
posterior = self.posteriors[0]
for shard in range(1, self.num_shards):
posterior += self.posteriors[shard]
posterior = posterior / self.num_shards
return f1_score(self.true_label, posterior.argmax(axis=1).cpu().numpy(), average="micro")
def _majority_aggregator(self):
pred_labels = []
for shard in range(self.num_shards):
pred_labels.append(self.posteriors[shard].argmax(axis=1).cpu().numpy())
pred_labels = np.stack(pred_labels)
pred_label = np.argmax(
np.apply_along_axis(np.bincount, axis=0, arr=pred_labels, minlength=self.posteriors[0].shape[1]), axis=0)
return f1_score(self.true_label, pred_label, average="micro")
def _optimal_aggregator(self):
optimal = OptimalAggregator(self.run, self.target_model, self.data, self.args)
optimal.generate_train_data()
weight_para = optimal.optimization()
self.data_store.save_optimal_weight(weight_para, run=self.run)
posterior = self.posteriors[0] * weight_para[0]
for shard in range(1, self.num_shards):
posterior += self.posteriors[shard] * weight_para[shard]
return f1_score(self.true_label, posterior.argmax(axis=1).cpu().numpy(), average="micro")
| 2,958 | 35.9875 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_aggregator/__init__.py | 0 | 0 | 0 | py |
|
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/partition_lpa.py | import math
import numpy as np
import networkx as nx
import logging
import pickle
from lib_graph_partition.constrained_lpa_base import ConstrainedLPABase
from lib_graph_partition.partition import Partition
from lib_graph_partition.constrained_lpa import ConstrainedLPA
import config
class PartitionLPA(Partition):
def __init__(self, args, graph):
super(PartitionLPA, self).__init__(args, graph)
self.logger = logging.getLogger('partition_lpa')
def partition(self):
# implement LPA by hand, refer to https://github.com/benedekrozemberczki/LabelPropagation
community_generator = nx.algorithms.community.label_propagation.label_propagation_communities(self.graph)
self.logger.info("Generating LPA communities.")
community_to_node = {key: c for key, c in zip(range(self.graph.number_of_nodes()), community_generator)}
print("Found %s communities by unconstrained LPA", len(community_to_node.keys()))
return community_to_node
class PartitionConstrainedLPA(Partition):
def __init__(self, args, graph):
super(PartitionConstrainedLPA, self).__init__(args, graph)
self.args = args
self.logger = logging.getLogger('partition_constrained_lpa')
def partition(self):
adj_array = nx.linalg.adj_matrix(self.graph).toarray().astype(np.bool)
# node_threshold = math.ceil(self.graph.number_of_nodes() / self.args['num_shards']) + 0.05 * self.graph.number_of_nodes()
# node_threshold = math.ceil(self.graph.number_of_nodes() / self.args['num_shards'])
node_threshold = math.ceil(self.graph.number_of_nodes() / self.args['num_shards'] +
self.args['shard_size_delta'] * (self.graph.number_of_nodes()-self.graph.number_of_nodes() / self.args['num_shards']))
self.logger.info(" #. nodes: %s. LPA shard threshold: %s." % (self.graph.number_of_nodes(), node_threshold))
lpa = ConstrainedLPA(adj_array, self.num_shards, node_threshold, self.args['terminate_delta'])
lpa.initialization()
community_to_node, lpa_deltas = lpa.community_detection()
pickle.dump(lpa_deltas, open(config.ANALYSIS_PATH + "partition/blpa_" + self.args['dataset_name'], 'wb'))
return self.idx2id(community_to_node, np.array(self.graph.nodes))
class PartitionConstrainedLPABase(Partition):
def __init__(self, args, graph):
super(PartitionConstrainedLPABase, self).__init__(args, graph)
self.args = args
self.logger = logging.getLogger('partition_constrained_lpa')
def partition(self):
adj_array = nx.linalg.adj_matrix(self.graph).toarray().astype(np.bool)
node_threshold = math.ceil(self.graph.number_of_nodes() / self.args['num_shards'] + self.args['shard_size_delta'] * (self.graph.number_of_nodes()-self.graph.number_of_nodes() / self.args['num_shards']))
self.logger.info(" #. nodes: %s. LPA shard threshold: %s." % (self.graph.number_of_nodes(), node_threshold))
lpa = ConstrainedLPABase(adj_array, self.num_shards, node_threshold, self.args['terminate_delta'])
lpa.initialization()
community_to_node, lpa_deltas = lpa.community_detection()
pickle.dump(lpa_deltas, open(config.ANALYSIS_PATH + "partition/base_blpa_" + self.args['dataset_name'], 'wb'))
return self.idx2id(community_to_node, np.array(self.graph.nodes))
| 3,406 | 45.671233 | 210 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/constrained_kmeans.py | import logging
import copy
from tqdm import tqdm
import numpy as np
import cupy as np
class ConstrainedKmeans:
def __init__(self, data_feat, num_clusters, node_threshold, terminate_delta, max_iteration=20):
self.logger = logging.getLogger('constrained_kmeans')
self.data_feat = data_feat
self.num_clusters = num_clusters
self.node_threshold = node_threshold
self.terminate_delta = terminate_delta
self.max_iteration = max_iteration
def initialization(self):
centroids = np.random.choice(np.arange(self.data_feat.shape[0]), self.num_clusters, replace=False)
self.centroid = {}
for i in range(self.num_clusters):
self.centroid[i] = self.data_feat[centroids[i].get()]
def clustering(self):
centroid = copy.deepcopy(self.centroid)
km_delta = []
pbar = tqdm(total=self.max_iteration)
pbar.set_description('Clustering')
for i in range(self.max_iteration):
self.logger.info('iteration %s' % (i,))
self._node_reassignment()
self._centroid_updating()
# record the average change of centroids, if the change is smaller than a very small value, then terminate
delta = self._centroid_delta(centroid, self.centroid)
km_delta.append(delta)
centroid = copy.deepcopy(self.centroid)
if delta <= self.terminate_delta:
break
self.logger.info("delta: %s" % delta)
pbar.close()
return self.clusters, km_delta
def _node_reassignment(self):
self.clusters = {}
for i in range(self.num_clusters):
self.clusters[i] = np.zeros(0, dtype=np.uint64)
distance = np.zeros([self.num_clusters, self.data_feat.shape[0]])
for i in range(self.num_clusters):
distance[i] = np.sum(np.power((self.data_feat - self.centroid[i]), 2), axis=1)
sort_indices = np.unravel_index(np.argsort(distance, axis=None), distance.shape)
clusters = sort_indices[0]
users = sort_indices[1]
selected_nodes = np.zeros(0, dtype=np.int64)
counter = 0
while len(selected_nodes) < self.data_feat.shape[0]:
cluster = int(clusters[counter])
user = users[counter]
if self.clusters[cluster].size < self.node_threshold:
self.clusters[cluster] = np.append(self.clusters[cluster], np.array(int(user)))
selected_nodes = np.append(selected_nodes, np.array(int(user)))
# delete all the following pairs for the selected user
user_indices = np.where(users == user)[0]
a = np.arange(users.size)
b = user_indices[user_indices > counter]
remain_indices = a[np.where(np.logical_not(np.isin(a, b)))[0]]
clusters = clusters[remain_indices]
users = users[remain_indices]
counter += 1
def _centroid_updating(self):
for i in range(self.num_clusters):
self.centroid[i] = np.mean(self.data_feat[self.clusters[i].astype(int)], axis=0)
def _centroid_delta(self, centroid_pre, centroid_cur):
delta = 0.0
for i in range(len(centroid_cur)):
delta += np.sum(np.abs(centroid_cur[i] - centroid_pre[i]))
return delta
if __name__ == '__main__':
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
data_feat = np.array([[1, 2],
[1, 3],
[1, 4],
[1, 5],
[10, 2],
[10, 3]])
num_clusters = 2
node_threshold = 3
terminate_delta = 0.001
cluster = ConstrainedKmeans(data_feat, num_clusters, node_threshold, terminate_delta)
cluster.initialization()
cluster.clustering() | 4,038 | 34.743363 | 118 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/graph_partition.py | import logging
from lib_graph_partition.partition_kmeans import PartitionKMeans
from lib_graph_partition.partition_lpa import PartitionConstrainedLPA, PartitionLPA, PartitionConstrainedLPABase
from lib_graph_partition.metis_partition import MetisPartition
from lib_graph_partition.partition_random import PartitionRandom
class GraphPartition:
def __init__(self, args, graph, dataset=None):
self.logger = logging.getLogger(__name__)
self.args = args
self.graph = graph
self.dataset = dataset
self.partition_method = self.args['partition_method']
self.num_shards = self.args['num_shards']
def graph_partition(self):
self.logger.info('graph partition, method: %s' % self.partition_method)
if self.partition_method == 'random':
partition_method = PartitionRandom(self.args, self.graph)
elif self.partition_method in ['sage_km', 'sage_km_base']:
partition_method = PartitionKMeans(self.args, self.graph, self.dataset)
elif self.partition_method == 'lpa' and not self.args['is_constrained']:
partition_method = PartitionLPA(self.args, self.graph)
elif self.partition_method == 'lpa' and self.args['is_constrained']:
partition_method = PartitionConstrainedLPA(self.args, self.graph)
elif self.partition_method == 'lpa_base':
partition_method = PartitionConstrainedLPABase(self.args, self.graph)
elif self.partition_method == 'metis':
partition_method = MetisPartition(self.args, self.graph, self.dataset)
else:
raise Exception('Unsupported partition method')
return partition_method.partition()
| 1,708 | 42.820513 | 112 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/constrained_kmeans_base.py | # An implementation of ``Balanced K-Means for Clustering.'' (https://rdcu.be/cESzk)
import logging
import copy
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from munkres import Munkres
from lib_graph_partition.hungarian import Hungarian
from lib_graph_partition.hungarian_1 import KMMatcher
class ConstrainedKmeansBase:
def __init__(self, data_feat, num_clusters, node_threshold, terminate_delta, max_iteration=20):
self.logger = logging.getLogger('constrained_kmeans_base')
self.data_feat = data_feat
self.num_clusters = num_clusters
self.node_threshold = node_threshold
self.terminate_delta = terminate_delta
self.max_iteration = max_iteration
def initialization(self):
centroids = np.random.choice(np.arange(self.data_feat.shape[0]), self.num_clusters, replace=False)
self.centroid = dict(zip(range(self.num_clusters), self.data_feat[centroids]))
def clustering(self):
centroid = copy.deepcopy(self.centroid)
centroid_delta = {}
km_base_delta = []
for i in range(self.max_iteration):
self.logger.info('iteration %s' % (i))
self._node_reassignment()
self._centroid_updating()
# record the average change of centroids, if the change is smaller than a very small value, then terminate
delta = self._centroid_delta(centroid, self.centroid)
centroid_delta[i] = delta
km_base_delta.append(delta)
centroid = copy.deepcopy(self.centroid)
if delta <= self.terminate_delta:
break
self.logger.info("delta: %s" % delta)
return self.clusters, km_base_delta
def _node_reassignment(self):
self.logger.info('Node reassignment begins')
self.clusters = dict(
zip(np.arange(self.num_clusters), [np.zeros(0, dtype=np.uint64) for _ in range(self.num_clusters)]))
distance = np.zeros([self.num_clusters, self.data_feat.shape[0]])
# cost_matrix = np.zeros([self.data_feat.shape[0], self.data_feat.shape[0]])
for i in range(self.num_clusters):
distance[i] = np.sum((self.data_feat - self.centroid[i]) ** 2, axis=1)
cost_matrix = np.tile(distance, (self.data_feat.shape[0], 1))
cost_matrix = cost_matrix[:self.data_feat.shape[0], :]
# too slow
# matrix = np.array(cost_matrix)
# m = Munkres()
# assignment = m.compute(matrix)
# assignment = np.array(assignment)
# assignment = assignment[:, 1]
# hungarian = Hungarian(cost_matrix)
# hungarian.calculate()
# assignment = hungarian.get_results()
# assignment = np.array(assignment)
# assignment = assignment[np.argsort(assignment[:, 0])]
# assignment = assignment[:, 1]
matcher = KMMatcher(cost_matrix)
assignment, _ = matcher.solve()
partition = np.zeros(self.data_feat.shape[0])
for i in range(self.data_feat.shape[0]):
partition[assignment[i]] = i % self.num_clusters
for i in range(self.num_clusters):
self.clusters[i] = np.where(partition == i)[0]
def _centroid_updating(self):
self.logger.info('Updating centroid begins')
for i in range(self.num_clusters):
self.centroid[i] = np.mean(self.data_feat[self.clusters[i]], axis=0)
def _centroid_delta(self, centroid_pre, centroid_cur):
delta = 0.0
for i in range(len(centroid_cur)):
delta += np.sum(np.abs(centroid_cur[i] - centroid_pre[i]))
return delta
if __name__ == '__main__':
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
data_feat = np.array([[1, 2],
[1, 3],
[1, 4],
[1, 5],
[10, 2],
[10, 3]])
num_clusters = 2
node_threshold = 3
terminate_delta = 0.001
cluster = ConstrainedKmeansBase(data_feat, num_clusters, node_threshold, terminate_delta)
cluster.initialization()
cluster.clustering()
| 4,307 | 35.820513 | 118 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/metis_partition.py | import numpy as np
import networkx as nx
import pymetis
from torch_geometric.data import ClusterData
from torch_geometric.utils import from_networkx
from lib_graph_partition.partition import Partition
class MetisPartition(Partition):
def __init__(self, args, graph, dataset):
super(MetisPartition, self).__init__(args, graph, dataset)
self.graph = graph
self.args = args
self.data = dataset
def partition(self, recursive=False):
# recursive (bool, optional): If set to :obj:`True`, will use multilevel
# recursive bisection instead of multilevel k-way partitioning.
# (default: :obj:`False`)
# only use train data, not the whole dataset
self.train_data = from_networkx(self.graph)
data = ClusterData(self.train_data, self.args['num_shards'], recursive=recursive)
community_to_node = {}
for i in range(self.args['num_shards']):
community_to_node[i] = [*range(data.partptr[i], data.partptr[i+1], 1)]
# map node back to original graph
for com in range(self.args['num_shards']):
community_to_node[com] = np.array(list(self.graph.nodes))[data.partptr.numpy()[com]:data.partptr.numpy()[com+1]]
return community_to_node
class PyMetisPartition(Partition):
def __init__(self, args, graph, dataset):
super(PyMetisPartition, self).__init__(args, graph, dataset)
self.graph = graph
self.args = args
self.data = dataset
def partition(self, recursive=False):
# recursive (bool, optional): If set to :obj:`True`, will use multilevel
# recursive bisection instead of multilevel k-way partitioning.
# (default: :obj:`False`)
# only use train data, not the whole dataset
# map graph into new graph
mapping = {}
for i, node in enumerate(self.graph.nodes):
mapping[node] = i
partition_graph = nx.relabel_nodes(self.graph, mapping=mapping)
adj_list = []
for line in nx.generate_adjlist(partition_graph):
line_int = list(map(int, line.split()))
adj_list.append(np.array(line_int))
n_cuts, membership = pymetis.part_graph(self.args['num_shards'], adjacency=adj_list)
# map node back to original graph
community_to_node = {}
for shard_index in range(self.args['num_shards']):
community_to_node[shard_index] = np.array([node_id for node_id, node_shard_index in zip(list(mapping.keys()), membership) if node_shard_index == shard_index])
return community_to_node
| 2,609 | 38.545455 | 170 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/constrained_lpa.py | import copy
import logging
from collections import defaultdict
import numpy as np
class ConstrainedLPA:
def __init__(self, adj, num_communities, node_threshold, terminate_delta):
self.logger = logging.getLogger('constrained_lpa_single')
self.adj = adj
self.num_nodes = adj.shape[0]
self.num_communities = num_communities
self.node_threshold = node_threshold
self.terminate_delta = terminate_delta
def initialization(self):
self.logger.info('initializing communities')
random_nodes = np.arange(self.num_nodes)
np.random.shuffle(random_nodes)
self.communities = defaultdict(set)
self.node_community = np.zeros(self.adj.shape[0])
# each node use node is as its community label
for community, nodes in enumerate(np.array_split(random_nodes, self.num_communities)):
self.communities[community] = set(nodes)
self.node_community[nodes] = community
def community_detection(self, iterations=100):
self.logger.info('detecting communities')
communities = copy.deepcopy(self.communities)
lpa_deltas = []
# Currently, break when maximum iterations round achieves.
for i in range(iterations):
self.logger.info('iteration %s' % (i,))
desire_move = self._determine_desire_move()
sort_indices = np.flip(np.argsort(desire_move[:, 2]))
candidate_nodes = defaultdict(list)
# allocate nodes' community with descending order of colocate count
for node in sort_indices:
src_community = desire_move[node][0]
dst_community = desire_move[node][1]
if src_community != dst_community:
if len(self.communities[dst_community]) < self.node_threshold:
self.node_community[node] = dst_community
self.communities[dst_community].add(node)
self.communities[src_community].remove(node)
# reallocate the candidate nodes
candidate_nodes_cur = candidate_nodes[src_community]
while len(candidate_nodes_cur) != 0:
node_cur = candidate_nodes_cur[0]
src_community_cur = desire_move[node_cur][0]
dst_community_cur = desire_move[node_cur][1]
self.node_community[node_cur] = dst_community_cur
self.communities[dst_community_cur].add(node_cur)
self.communities[src_community_cur].remove(node_cur)
candidate_nodes[dst_community_cur].pop(0)
candidate_nodes_cur = candidate_nodes[src_community_cur]
else:
candidate_nodes[dst_community].append(node)
# record the communities of each iteration, break the loop while communities are stable.
delta = self._lpa_delta(communities, self.communities)
lpa_deltas.append(delta)
self.logger.info("%d" % delta)
communities = copy.deepcopy(self.communities)
if delta <= self.terminate_delta:
break
return self.communities, lpa_deltas
def _determine_desire_move(self):
desire_move = np.zeros([self.num_nodes, 3])
desire_move[:, 0] = self.node_community
for i in range(self.num_nodes):
# neighbor_community = self.node_community[np.nonzero(self.adj[i])[0]] # for non-bool adj
neighbor_community = self.node_community[self.adj[i]] # for bool adj
unique_community, unique_count = np.unique(neighbor_community, return_counts=True)
if unique_community.shape[0] == 0:
continue
max_indices = np.where(unique_count == np.max(unique_count))[0]
if max_indices.size == 1:
desire_move[i, 1] = unique_community[max_indices]
desire_move[i, 2] = unique_count[max_indices]
elif max_indices.size > 1:
max_index = np.random.choice(max_indices)
desire_move[i, 1] = unique_community[max_index]
desire_move[i, 2] = unique_count[max_index]
return desire_move
def _lpa_delta(self, lpa_pre, lpa_cur):
delta = 0.0
for i in range(len(lpa_cur)):
delta += len((lpa_cur[i] | lpa_pre[i]) - (lpa_cur[i] & lpa_pre[i]))
return delta
if __name__ == '__main__':
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
adj = np.array([[0, 1, 1],
[1, 0, 1],
[1, 1, 0]],
dtype=np.bool)
num_communities = 2
node_threshold = 3
terminate_delta = 1
lpa = ConstrainedLPA(adj, num_communities, node_threshold, terminate_delta)
lpa.initialization()
lpa.community_detection()
| 5,167 | 38.450382 | 104 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/partition_kmeans.py | import math
import pickle
import cupy as cp
import numpy as np
import logging
from sklearn.cluster import KMeans
import config
from lib_graph_partition.constrained_kmeans_base import ConstrainedKmeansBase
from lib_graph_partition.partition import Partition
from lib_graph_partition.constrained_kmeans import ConstrainedKmeans
from lib_node_embedding.node_embedding import NodeEmbedding
class PartitionKMeans(Partition):
def __init__(self, args, graph, dataset):
super(PartitionKMeans, self).__init__(args, graph, dataset)
self.logger = logging.getLogger('partition_kmeans')
cp.cuda.Device(self.args['cuda']).use()
self.load_embeddings()
def load_embeddings(self):
node_embedding = NodeEmbedding(self.args, self.graph, self.dataset)
if self.partition_method in ["sage_km", "sage_km_base"]:
self.node_to_embedding = node_embedding.sage_encoder()
else:
raise Exception('unsupported embedding method')
def partition(self):
self.logger.info("partitioning")
embedding = []
for node in self.node_to_embedding.keys():
embedding.append(self.node_to_embedding[node])
if not self.args['is_constrained']:
cluster = KMeans(n_clusters=self.num_shards, random_state=10)
cluster_labels = cluster.fit_predict(embedding)
node_to_community = {}
for com, node in zip(cluster_labels, self.node_to_embedding.keys()):
node_to_community[node] = com
community_to_node = {}
for com in range(len(set(node_to_community.values()))):
community_to_node[com] = np.where(np.array(list(node_to_community.values())) == com)[0]
community_to_node = dict(sorted(community_to_node.items()))
else:
# node_threshold = math.ceil(self.graph.number_of_nodes() / self.num_shards)
# node_threshold = math.ceil(self.graph.number_of_nodes() / self.num_shards + 0.05*self.graph.number_of_nodes())
node_threshold = math.ceil(
self.graph.number_of_nodes() / self.args['num_shards'] + self.args['shard_size_delta'] * (
self.graph.number_of_nodes() - self.graph.number_of_nodes() / self.args['num_shards']))
self.logger.info("#.nodes: %s. Shard threshold: %s." % (self.graph.number_of_nodes(), node_threshold))
if self.partition_method == 'sage_km_base':
cluster = ConstrainedKmeansBase(np.array(embedding), num_clusters=self.num_shards,
node_threshold=node_threshold,
terminate_delta=self.args['terminate_delta'])
cluster.initialization()
community, km_deltas = cluster.clustering()
pickle.dump(km_deltas, open(config.ANALYSIS_PATH + "partition/base_bkm_" + self.args['dataset_name'], 'wb'))
community_to_node = {}
for i in range(self.num_shards):
community_to_node[i] = np.array(community[i])
if self.partition_method == 'sage_km':
cluster = ConstrainedKmeans(cp.array(embedding), num_clusters=self.num_shards,
node_threshold=node_threshold,
terminate_delta=self.args['terminate_delta'])
cluster.initialization()
community, km_deltas = cluster.clustering()
pickle.dump(km_deltas, open(config.ANALYSIS_PATH + "partition/bkm_" + self.args['dataset_name'], 'wb'))
community_to_node = {}
for i in range(self.num_shards):
community_to_node[i] = np.array(community[i].get().astype(int))
return community_to_node
| 3,881 | 43.62069 | 124 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/hungarian.py | #!/usr/bin/python
"""
Implementation of the Hungarian (Munkres) Algorithm using Python and NumPy
References: http://www.ams.jhu.edu/~castello/362/Handouts/hungarian.pdf
http://weber.ucsd.edu/~vcrawfor/hungar.pdf
http://en.wikipedia.org/wiki/Hungarian_algorithm
http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html
http://www.clapper.org/software/python/munkres/
"""
# Module Information.
__version__ = "1.1.1"
__author__ = "Thom Dedecko"
__url__ = "http://github.com/tdedecko/hungarian-algorithm"
__copyright__ = "(c) 2010 Thom Dedecko"
__license__ = "MIT License"
class HungarianError(Exception):
pass
# Import numpy. Error if fails
try:
import numpy as np
except ImportError:
raise HungarianError("NumPy is not installed.")
class Hungarian:
"""
Implementation of the Hungarian (Munkres) Algorithm using np.
Usage:
hungarian = Hungarian(cost_matrix)
hungarian.calculate()
or
hungarian = Hungarian()
hungarian.calculate(cost_matrix)
Handle Profit matrix:
hungarian = Hungarian(profit_matrix, is_profit_matrix=True)
or
cost_matrix = Hungarian.make_cost_matrix(profit_matrix)
The matrix will be automatically padded if it is not square.
For that numpy's resize function is used, which automatically adds 0's to any row/column that is added
Get results and total potential after calculation:
hungarian.get_results()
hungarian.get_total_potential()
"""
def __init__(self, input_matrix=None, is_profit_matrix=False):
"""
input_matrix is a List of Lists.
input_matrix is assumed to be a cost matrix unless is_profit_matrix is True.
"""
if input_matrix is not None:
# Save input
my_matrix = np.array(input_matrix)
self._input_matrix = np.array(input_matrix)
self._maxColumn = my_matrix.shape[1]
self._maxRow = my_matrix.shape[0]
# Adds 0s if any columns/rows are added. Otherwise stays unaltered
matrix_size = max(self._maxColumn, self._maxRow)
pad_columns = matrix_size - self._maxRow
pad_rows = matrix_size - self._maxColumn
my_matrix = np.pad(my_matrix, ((0,pad_columns),(0,pad_rows)), 'constant', constant_values=(0))
# Convert matrix to profit matrix if necessary
if is_profit_matrix:
my_matrix = self.make_cost_matrix(my_matrix)
self._cost_matrix = my_matrix
self._size = len(my_matrix)
self._shape = my_matrix.shape
# Results from algorithm.
self._results = []
self._totalPotential = 0
else:
self._cost_matrix = None
def get_results(self):
"""Get results after calculation."""
return self._results
def get_total_potential(self):
"""Returns expected value after calculation."""
return self._totalPotential
def calculate(self, input_matrix=None, is_profit_matrix=False):
"""
Implementation of the Hungarian (Munkres) Algorithm.
input_matrix is a List of Lists.
input_matrix is assumed to be a cost matrix unless is_profit_matrix is True.
"""
# Handle invalid and new matrix inputs.
if input_matrix is None and self._cost_matrix is None:
raise HungarianError("Invalid input")
elif input_matrix is not None:
self.__init__(input_matrix, is_profit_matrix)
result_matrix = self._cost_matrix.copy()
# Step 1: Subtract row mins from each row.
for index, row in enumerate(result_matrix):
result_matrix[index] -= row.min()
# Step 2: Subtract column mins from each column.
for index, column in enumerate(result_matrix.T):
result_matrix[:, index] -= column.min()
# Step 3: Use minimum number of lines to cover all zeros in the matrix.
# If the total covered rows+columns is not equal to the matrix size then adjust matrix and repeat.
total_covered = 0
while total_covered < self._size:
# Find minimum number of lines to cover all zeros in the matrix and find total covered rows and columns.
cover_zeros = CoverZeros(result_matrix)
covered_rows = cover_zeros.get_covered_rows()
covered_columns = cover_zeros.get_covered_columns()
total_covered = len(covered_rows) + len(covered_columns)
# if the total covered rows+columns is not equal to the matrix size then adjust it by min uncovered num (m).
if total_covered < self._size:
result_matrix = self._adjust_matrix_by_min_uncovered_num(result_matrix, covered_rows, covered_columns)
# Step 4: Starting with the top row, work your way downwards as you make assignments.
# Find single zeros in rows or columns.
# Add them to final result and remove them and their associated row/column from the matrix.
expected_results = min(self._maxColumn, self._maxRow)
zero_locations = (result_matrix == 0)
while len(self._results) != expected_results:
# If number of zeros in the matrix is zero before finding all the results then an error has occurred.
if not zero_locations.any():
raise HungarianError("Unable to find results. Algorithm has failed.")
# Find results and mark rows and columns for deletion
matched_rows, matched_columns = self.__find_matches(zero_locations)
# Make arbitrary selection
total_matched = len(matched_rows) + len(matched_columns)
if total_matched == 0:
matched_rows, matched_columns = self.select_arbitrary_match(zero_locations)
# Delete rows and columns
for row in matched_rows:
zero_locations[row] = False
for column in matched_columns:
zero_locations[:, column] = False
# Save Results
self.__set_results(zip(matched_rows, matched_columns))
# Calculate total potential
value = 0
for row, column in self._results:
value += self._input_matrix[row, column]
self._totalPotential = value
@staticmethod
def make_cost_matrix(profit_matrix):
"""
Converts a profit matrix into a cost matrix.
Expects NumPy objects as input.
"""
# subtract profit matrix from a matrix made of the max value of the profit matrix
matrix_shape = profit_matrix.shape
offset_matrix = np.ones(matrix_shape, dtype=int) * profit_matrix.max()
cost_matrix = offset_matrix - profit_matrix
return cost_matrix
def _adjust_matrix_by_min_uncovered_num(self, result_matrix, covered_rows, covered_columns):
"""Subtract m from every uncovered number and add m to every element covered with two lines."""
# Calculate minimum uncovered number (m)
elements = []
for row_index, row in enumerate(result_matrix):
if row_index not in covered_rows:
for index, element in enumerate(row):
if index not in covered_columns:
elements.append(element)
min_uncovered_num = min(elements)
# Add m to every covered element
adjusted_matrix = result_matrix
for row in covered_rows:
adjusted_matrix[row] += min_uncovered_num
for column in covered_columns:
adjusted_matrix[:, column] += min_uncovered_num
# Subtract m from every element
m_matrix = np.ones(self._shape, dtype=int) * min_uncovered_num
adjusted_matrix -= m_matrix
return adjusted_matrix
def __find_matches(self, zero_locations):
"""Returns rows and columns with matches in them."""
marked_rows = np.array([], dtype=int)
marked_columns = np.array([], dtype=int)
# Mark rows and columns with matches
# Iterate over rows
for index, row in enumerate(zero_locations):
row_index = np.array([index])
if np.sum(row) == 1:
column_index, = np.where(row)
marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index,
column_index)
# Iterate over columns
for index, column in enumerate(zero_locations.T):
column_index = np.array([index])
if np.sum(column) == 1:
row_index, = np.where(column)
marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index,
column_index)
return marked_rows, marked_columns
@staticmethod
def __mark_rows_and_columns(marked_rows, marked_columns, row_index, column_index):
"""Check if column or row is marked. If not marked then mark it."""
new_marked_rows = marked_rows
new_marked_columns = marked_columns
if not (marked_rows == row_index).any() and not (marked_columns == column_index).any():
new_marked_rows = np.insert(marked_rows, len(marked_rows), row_index)
new_marked_columns = np.insert(marked_columns, len(marked_columns), column_index)
return new_marked_rows, new_marked_columns
@staticmethod
def select_arbitrary_match(zero_locations):
"""Selects row column combination with minimum number of zeros in it."""
# Count number of zeros in row and column combinations
rows, columns = np.where(zero_locations)
zero_count = []
for index, row in enumerate(rows):
total_zeros = np.sum(zero_locations[row]) + np.sum(zero_locations[:, columns[index]])
zero_count.append(total_zeros)
# Get the row column combination with the minimum number of zeros.
indices = zero_count.index(min(zero_count))
row = np.array([rows[indices]])
column = np.array([columns[indices]])
return row, column
def __set_results(self, result_lists):
"""Set results during calculation."""
# Check if results values are out of bound from input matrix (because of matrix being padded).
# Add results to results list.
for result in result_lists:
row, column = result
if row < self._maxRow and column < self._maxColumn:
new_result = (int(row), int(column))
self._results.append(new_result)
class CoverZeros:
"""
Use minimum number of lines to cover all zeros in the matrix.
Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf
"""
def __init__(self, matrix):
"""
Input a matrix and save it as a boolean matrix to designate zero locations.
Run calculation procedure to generate results.
"""
# Find zeros in matrix
self._zero_locations = (matrix == 0)
self._shape = matrix.shape
# Choices starts without any choices made.
self._choices = np.zeros(self._shape, dtype=bool)
self._marked_rows = []
self._marked_columns = []
# marks rows and columns
self.__calculate()
# Draw lines through all unmarked rows and all marked columns.
self._covered_rows = list(set(range(self._shape[0])) - set(self._marked_rows))
self._covered_columns = self._marked_columns
def get_covered_rows(self):
"""Return list of covered rows."""
return self._covered_rows
def get_covered_columns(self):
"""Return list of covered columns."""
return self._covered_columns
def __calculate(self):
"""
Calculates minimum number of lines necessary to cover all zeros in a matrix.
Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf
"""
while True:
# Erase all marks.
self._marked_rows = []
self._marked_columns = []
# Mark all rows in which no choice has been made.
for index, row in enumerate(self._choices):
if not row.any():
self._marked_rows.append(index)
# If no marked rows then finish.
if not self._marked_rows:
return True
# Mark all columns not already marked which have zeros in marked rows.
num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows()
# If no new marked columns then finish.
if num_marked_columns == 0:
return True
# While there is some choice in every marked column.
while self.__choice_in_all_marked_columns():
# Some Choice in every marked column.
# Mark all rows not already marked which have choices in marked columns.
num_marked_rows = self.__mark_new_rows_with_choices_in_marked_columns()
# If no new marks then Finish.
if num_marked_rows == 0:
return True
# Mark all columns not already marked which have zeros in marked rows.
num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows()
# If no new marked columns then finish.
if num_marked_columns == 0:
return True
# No choice in one or more marked columns.
# Find a marked column that does not have a choice.
choice_column_index = self.__find_marked_column_without_choice()
while choice_column_index is not None:
# Find a zero in the column indexed that does not have a row with a choice.
choice_row_index = self.__find_row_without_choice(choice_column_index)
# Check if an available row was found.
new_choice_column_index = None
if choice_row_index is None:
# Find a good row to accomodate swap. Find its column pair.
choice_row_index, new_choice_column_index = \
self.__find_best_choice_row_and_new_column(choice_column_index)
# Delete old choice.
self._choices[choice_row_index, new_choice_column_index] = False
# Set zero to choice.
self._choices[choice_row_index, choice_column_index] = True
# Loop again if choice is added to a row with a choice already in it.
choice_column_index = new_choice_column_index
def __mark_new_columns_with_zeros_in_marked_rows(self):
"""Mark all columns not already marked which have zeros in marked rows."""
num_marked_columns = 0
for index, column in enumerate(self._zero_locations.T):
if index not in self._marked_columns:
if column.any():
row_indices, = np.where(column)
zeros_in_marked_rows = (set(self._marked_rows) & set(row_indices)) != set([])
if zeros_in_marked_rows:
self._marked_columns.append(index)
num_marked_columns += 1
return num_marked_columns
def __mark_new_rows_with_choices_in_marked_columns(self):
"""Mark all rows not already marked which have choices in marked columns."""
num_marked_rows = 0
for index, row in enumerate(self._choices):
if index not in self._marked_rows:
if row.any():
column_index, = np.where(row)
if column_index in self._marked_columns:
self._marked_rows.append(index)
num_marked_rows += 1
return num_marked_rows
def __choice_in_all_marked_columns(self):
"""Return Boolean True if there is a choice in all marked columns. Returns boolean False otherwise."""
for column_index in self._marked_columns:
if not self._choices[:, column_index].any():
return False
return True
def __find_marked_column_without_choice(self):
"""Find a marked column that does not have a choice."""
for column_index in self._marked_columns:
if not self._choices[:, column_index].any():
return column_index
raise HungarianError(
"Could not find a column without a choice. Failed to cover matrix zeros. Algorithm has failed.")
def __find_row_without_choice(self, choice_column_index):
"""Find a row without a choice in it for the column indexed. If a row does not exist then return None."""
row_indices, = np.where(self._zero_locations[:, choice_column_index])
for row_index in row_indices:
if not self._choices[row_index].any():
return row_index
# All rows have choices. Return None.
return None
def __find_best_choice_row_and_new_column(self, choice_column_index):
"""
Find a row index to use for the choice so that the column that needs to be changed is optimal.
Return a random row and column if unable to find an optimal selection.
"""
row_indices, = np.where(self._zero_locations[:, choice_column_index])
for row_index in row_indices:
column_indices, = np.where(self._choices[row_index])
column_index = column_indices[0]
if self.__find_row_without_choice(column_index) is not None:
return row_index, column_index
# Cannot find optimal row and column. Return a random row and column.
from random import shuffle
shuffle(row_indices)
column_index, = np.where(self._choices[row_indices[0]])
return row_indices[0], column_index[0]
if __name__ == '__main__':
profit_matrix = [
[62, 75, 80, 93, 95, 97],
[75, 80, 82, 85, 71, 97],
[80, 75, 81, 98, 90, 97],
[78, 82, 84, 80, 50, 98],
[90, 85, 85, 80, 85, 99],
[65, 75, 80, 75, 68, 96]]
hungarian = Hungarian(profit_matrix, is_profit_matrix=True)
hungarian.calculate()
print("Expected value:\t\t543")
print("Calculated value:\t", hungarian.get_total_potential()) # = 543
print("Expected results:\n\t[(0, 4), (2, 3), (5, 5), (4, 0), (1, 1), (3, 2)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
cost_matrix = [
[4, 2, 8],
[4, 3, 7],
[3, 1, 6]]
hungarian = Hungarian(cost_matrix)
print('calculating...')
hungarian.calculate()
print("Expected value:\t\t12")
print("Calculated value:\t", hungarian.get_total_potential()) # = 12
print("Expected results:\n\t[(0, 1), (1, 0), (2, 2)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
profit_matrix = [
[62, 75, 80, 93, 0, 97],
[75, 0, 82, 85, 71, 97],
[80, 75, 81, 0, 90, 97],
[78, 82, 0, 80, 50, 98],
[0, 85, 85, 80, 85, 99],
[65, 75, 80, 75, 68, 0]]
hungarian = Hungarian()
hungarian.calculate(profit_matrix, is_profit_matrix=True)
print("Expected value:\t\t523")
print("Calculated value:\t", hungarian.get_total_potential()) # = 523
print("Expected results:\n\t[(0, 3), (2, 4), (3, 0), (5, 2), (1, 5), (4, 1)]")
print("Results:\n\t", hungarian.get_results())
print("-" * 80)
| 19,635 | 40.252101 | 120 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/constrained_lpa_base.py | # An implementation of `` Balanced Label Propagation for Partitioning MassiveGraphs'' (https://stanford.edu/~jugander/papers/wsdm13-blp.pdf)
import copy
import logging
from collections import defaultdict
import numpy as np
import cvxpy as cp
from scipy.stats import linregress
class ConstrainedLPABase:
def __init__(self, adj, num_communities, node_threshold, terminate_delta):
self.logger = logging.getLogger('constrained_lpa_base')
self.adj = adj
self.num_nodes = adj.shape[0]
self.num_communities = num_communities
self.node_threshold = node_threshold
self.terminate_delta = terminate_delta
def initialization(self):
self.logger.info('initializing communities')
random_nodes = np.arange(self.num_nodes)
np.random.shuffle(random_nodes)
self.communities = defaultdict(set)
self.node_community = np.zeros(self.adj.shape[0])
# each node use node is as its community label
for community, nodes in enumerate(np.array_split(random_nodes, self.num_communities)):
self.communities[community] = set(nodes)
self.node_community[nodes] = community
def community_detection(self, iterations=100):
self.logger.info('detecting communities')
communities = copy.deepcopy(self.communities)
lpa_deltas = []
for i in range(iterations):
self.logger.info('iteration %s' % (i,))
## Step 1: calculate desired move
desire_move = self._determine_desire_move()
relocation = {}
utility_func = {}
## Step 2: calculate parameters for linear programming problem
for src_community in range(self.num_communities):
for dst_community in range(self.num_communities):
move_node = desire_move[np.where(np.logical_and(desire_move[:, 1] == src_community, desire_move[:, 2] == dst_community))[0]]
if src_community != dst_community and move_node.size != 0:
move_node = move_node[np.flip(np.argsort(move_node[:, 3]))]
relocation[(src_community, dst_community)] = move_node
if move_node.shape[0] == 1:
utility_func[(src_community, dst_community)] = np.array([[0, move_node[0, 3]]])
else:
cum_sum = np.cumsum(move_node[:, 3])
utility_func_temp = np.zeros([move_node.shape[0] - 1, 2])
for k in range(move_node.shape[0] - 1):
utility_func_temp[k, 0], utility_func_temp[k, 1], _, _, _ = linregress([k, k+1], [cum_sum[k], cum_sum[k+1]])
utility_func[(src_community, dst_community)] = utility_func_temp
## Step 3: solve linear programming problem
x = cp.Variable([self.num_communities, self.num_communities])
z = cp.Variable([self.num_communities, self.num_communities])
objective = cp.Maximize(cp.sum(z))
constraints = []
for src_community in range(self.num_communities):
const = 0
for dst_community in range(self.num_communities):
if (src_community, dst_community) in relocation:
if src_community == dst_community:
constraints.append(x[src_community, dst_community] == 0)
constraints.append(z[src_community, dst_community] == 0)
else:
## Constraint 2 of Theorem 2
constraints.append(x[src_community, dst_community] >= 0)
constraints.append(x[src_community, dst_community] <= relocation[(src_community, dst_community)].shape[0])
## Constraint 1 of Theorem 2
if (dst_community, src_community) in relocation:
const += x[src_community, dst_community] - x[dst_community, src_community]
## Constraint 3 of Theorem 2
for utility_func_value in utility_func[(src_community, dst_community)]:
constraints.append(- utility_func_value[0] * x[src_community, dst_community] + z[src_community, dst_community] <= utility_func_value[1])
else:
constraints.append(x[src_community, dst_community] == 0)
constraints.append(z[src_community, dst_community] == 0)
## Constraint 1 of Theorem 2
constraints.append(len(self.communities[src_community]) + const <= self.node_threshold)
problem = cp.Problem(objective, constraints)
problem.solve()
## Step 4: parse linear programming problem results
if problem.status == 'optimal':
x_value = np.floor(np.abs(x.value)).astype(np.int64)
for src_community in range(self.num_communities):
for dst_community in range(self.num_communities):
if (src_community, dst_community) in relocation and x_value[src_community, dst_community] != 0:
# if (src_community, dst_community) in relocation:
relocation_temp = relocation[(src_community, dst_community)][:, 0].astype(np.int64)
move_node = relocation_temp[:x_value[src_community, dst_community] - 1]
if isinstance(move_node, np.int64):
self.communities[src_community].remove(move_node)
self.communities[dst_community].add(move_node)
self.node_community[move_node] = dst_community
else:
# move_node = set(move_node)
self.communities[src_community].difference_update(move_node)
self.communities[dst_community].update(move_node)
for node in move_node:
self.node_community[node] = dst_community
else:
self.logger.info("No optimal solution, break!")
break
## Check the number of moved nodes
delta = self._lpa_delta(communities, self.communities)
lpa_deltas.append(delta)
self.logger.info("%d" % delta)
communities = copy.deepcopy(self.communities)
if delta <= self.terminate_delta:
break
return self.communities, lpa_deltas
def _determine_desire_move(self):
desire_move = []
for i in range(self.num_nodes):
# neighbor_community = self.node_community[np.nonzero(self.adj[i])[0]] # for non-bool adj
neighbor_community = self.node_community[self.adj[i]] # for bool adj
unique_community, unique_count = np.unique(neighbor_community, return_counts=True)
src_relocation = unique_count[np.where(unique_community == self.node_community[i])[0]]
for community in unique_community:
if community != self.node_community[i]:
dst_relocation = unique_count[np.where(unique_community == community)[0]]
if dst_relocation - src_relocation >= 0:
desire_move_temp = np.zeros(4)
desire_move_temp[0] = i
desire_move_temp[1] = self.node_community[i]
desire_move_temp[2] = community
desire_move_temp[3] = dst_relocation - src_relocation
desire_move.append(desire_move_temp)
return np.stack(desire_move)
def _lpa_delta(self, lpa_pre, lpa_cur):
delta = 0.0
for i in range(len(lpa_cur)):
delta += len((lpa_cur[i] | lpa_pre[i]) - (lpa_cur[i] & lpa_pre[i]))
return delta
if __name__ == '__main__':
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
adj = np.array([[0, 1, 1],
[1, 0, 1],
[1, 1, 0]],
dtype=np.bool)
num_communities = 2
node_threshold = 3
terminate_delta = 1
lpa = ConstrainedLPABase(adj, num_communities, node_threshold, terminate_delta)
lpa.initialization()
lpa.community_detection()
| 8,700 | 45.77957 | 164 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/partition_random.py | import numpy as np
from lib_graph_partition.partition import Partition
class PartitionRandom(Partition):
def __init__(self, args, graph):
super(PartitionRandom, self).__init__(args, graph)
def partition(self):
graph_nodes = np.array(self.graph.nodes)
np.random.shuffle(graph_nodes)
train_shard_indices = np.array_split(graph_nodes, self.args['num_shards'])
return dict(zip(range(self.num_shards), train_shard_indices))
| 472 | 28.5625 | 82 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/__init__.py | 0 | 0 | 0 | py |
|
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/partition.py | import numpy as np
class Partition:
def __init__(self, args, graph, dataset=None):
self.args = args
self.graph = graph
self.dataset = dataset
self.partition_method = self.args['partition_method']
self.num_shards = self.args['num_shards']
self.dataset_name = self.args['dataset_name']
def idx2id(self, idx_dict, node_list):
ret_dict = {}
for com, idx in idx_dict.items():
ret_dict[com] = node_list[list(idx)]
return ret_dict
def id2idx(self, id_dict, node_list):
ret_dict = {}
for com, id in id_dict.items():
ret_dict[com] = np.searchsorted(node_list, id)
return ret_dict
| 738 | 26.37037 | 61 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_graph_partition/hungarian_1.py | '''
reference: https://www.topcoder.com/community/competitive-programming/tutorials/assignment-problem-and-hungarian-algorithm/
'''
import numpy as np
#max weight assignment
class KMMatcher:
## weights : nxm weight matrix (numpy , float), n <= m
def __init__(self, weights):
weights = np.array(weights).astype(np.float32)
self.weights = weights
self.n, self.m = weights.shape
assert self.n <= self.m
# init label
self.label_x = np.max(weights, axis=1)
self.label_y = np.zeros((self.m, ), dtype=np.float32)
self.max_match = 0
self.xy = -np.ones((self.n,), dtype=np.int)
self.yx = -np.ones((self.m,), dtype=np.int)
def do_augment(self, x, y):
self.max_match += 1
while x != -2:
self.yx[y] = x
ty = self.xy[x]
self.xy[x] = y
x, y = self.prev[x], ty
def find_augment_path(self):
self.S = np.zeros((self.n,), np.bool)
self.T = np.zeros((self.m,), np.bool)
self.slack = np.zeros((self.m,), dtype=np.float32)
self.slackyx = -np.ones((self.m,), dtype=np.int) # l[slackyx[y]] + l[y] - w[slackx[y], y] == slack[y]
self.prev = -np.ones((self.n,), np.int)
queue, st = [], 0
root = -1
for x in range(self.n):
if self.xy[x] == -1:
queue.append(x);
root = x
self.prev[x] = -2
self.S[x] = True
break
self.slack = self.label_y + self.label_x[root] - self.weights[root]
self.slackyx[:] = root
while True:
while st < len(queue):
x = queue[st]; st+= 1
is_in_graph = np.isclose(self.weights[x], self.label_x[x] + self.label_y)
nonzero_inds = np.nonzero(np.logical_and(is_in_graph, np.logical_not(self.T)))[0]
for y in nonzero_inds:
if self.yx[y] == -1:
return x, y
self.T[y] = True
queue.append(self.yx[y])
self.add_to_tree(self.yx[y], x)
self.update_labels()
queue, st = [], 0
is_in_graph = np.isclose(self.slack, 0)
nonzero_inds = np.nonzero(np.logical_and(is_in_graph, np.logical_not(self.T)))[0]
for y in nonzero_inds:
x = self.slackyx[y]
if self.yx[y] == -1:
return x, y
self.T[y] = True
if not self.S[self.yx[y]]:
queue.append(x)
self.add_to_tree(self.yx[y], x)
def solve(self, verbose = False):
while self.max_match < self.n:
x, y = self.find_augment_path()
self.do_augment(x, y)
sum = 0.
for x in range(self.n):
if verbose:
print('match {} to {}, weight {:.4f}'.format(x, self.xy[x], self.weights[x, self.xy[x]]))
sum += self.weights[x, self.xy[x]]
self.best = sum
if verbose:
print('ans: {:.4f}'.format(sum))
return self.xy, sum
def add_to_tree(self, x, prevx):
self.S[x] = True
self.prev[x] = prevx
better_slack_idx = self.label_x[x] + self.label_y - self.weights[x] < self.slack
self.slack[better_slack_idx] = self.label_x[x] + self.label_y[better_slack_idx] - self.weights[x, better_slack_idx]
self.slackyx[better_slack_idx] = x
def update_labels(self):
delta = self.slack[np.logical_not(self.T)].min()
self.label_x[self.S] -= delta
self.label_y[self.T] += delta
self.slack[np.logical_not(self.T)] -= delta
if __name__ == '__main__':
matcher = KMMatcher([
[2., 3., 0., 3.],
[0., 4., 4., 0.],
[5., 6., 0., 0.],
[0., 0., 7., 0.]
])
best = matcher.solve(verbose=True)
print(best)
| 3,953 | 31.146341 | 123 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gnn_base.py | import logging
import pickle
import torch
class GNNBase:
def __init__(self):
self.logger = logging.getLogger('gnn')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.device = torch.device('cpu')
self.model = None
self.embedding_dim = 0
self.data = None
self.subgraph_loader = None
def save_model(self, save_path):
self.logger.info('saving model')
torch.save(self.model.state_dict(), save_path)
def load_model(self, save_path):
self.logger.info('loading model')
device = torch.device('cpu')
self.model.load_state_dict(torch.load(save_path, map_location=device))
def save_paras(self, save_path):
self.logger.info('saving paras')
self.paras = {
'embedding_dim': self.embedding_dim
}
pickle.dump(self.paras, open(save_path, 'wb'))
def load_paras(self, save_path):
self.logger.info('loading paras')
return pickle.load(open(save_path, 'rb'))
def count_parameters(self):
return sum(p.numel() for p in self.model.parameters() if p.requires_grad)
def posterior(self):
self.model.eval()
self.model = self.model.to(self.device)
self.data = self.data.to(self.device)
posteriors = self.model(self.data)
for _, mask in self.data('test_mask'):
posteriors = posteriors[mask]
return posteriors.detach()
| 1,482 | 28.078431 | 82 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/node_classifier.py | import logging
import os
import torch
from sklearn.model_selection import train_test_split
torch.cuda.empty_cache()
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from torch_geometric.data import NeighborSampler
from torch_geometric.nn.conv.gcn_conv import gcn_norm
import numpy as np
import config
from lib_gnn_model.gat.gat_net_batch import GATNet
from lib_gnn_model.gin.gin_net_batch import GINNet
from lib_gnn_model.gcn.gcn_net_batch import GCNNet
from lib_gnn_model.graphsage.graphsage_net import SageNet
from lib_gnn_model.gnn_base import GNNBase
from parameter_parser import parameter_parser
from lib_utils import utils
class NodeClassifier(GNNBase):
def __init__(self, num_feats, num_classes, args, data=None):
super(NodeClassifier, self).__init__()
self.args = args
self.logger = logging.getLogger('node_classifier')
self.target_model = args['target_model']
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.device = 'cpu'
self.model = self.determine_model(num_feats, num_classes).to(self.device)
self.data = data
def determine_model(self, num_feats, num_classes):
self.logger.info('target model: %s' % (self.args['target_model'],))
if self.target_model == 'SAGE':
self.lr, self.decay = 0.01, 0.001
return SageNet(num_feats, 256, num_classes)
elif self.target_model == 'GAT':
self.lr, self.decay = 0.01, 0.001
return GATNet(num_feats, num_classes)
elif self.target_model == 'GCN':
self.lr, self.decay = 0.05, 0.0001
return GCNNet(num_feats, num_classes)
elif self.target_model == 'GIN':
self.lr, self.decay = 0.01, 0.0001
return GINNet(num_feats, num_classes)
else:
raise Exception('unsupported target model')
def train_model(self):
self.logger.info("training model")
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self.data.y = self.data.y.squeeze().to(self.device)
self._gen_train_loader()
optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr, weight_decay=self.decay)
for epoch in range(self.args['num_epochs']):
self.logger.info('epoch %s' % (epoch,))
for batch_size, n_id, adjs in self.train_loader:
# self.logger.info("batch size: %s"%(batch_size))
# `adjs` holds a list of `(edge_index, e_id, size)` tuples.
adjs = [adj.to(self.device) for adj in adjs]
test_node = np.nonzero(self.data.test_mask.cpu().numpy())[0]
intersect = np.intersect1d(test_node, n_id.numpy())
optimizer.zero_grad()
if self.target_model == 'GCN':
out = self.model(self.data.x[n_id], adjs, self.edge_weight)
else:
out = self.model(self.data.x[n_id], adjs)
loss = F.nll_loss(out, self.data.y[n_id[:batch_size]])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info(f'Train: {train_acc:.4f}, Test: {test_acc:.4f}')
@torch.no_grad()
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_test_loader()
if self.target_model == 'GCN':
out = self.model.inference(self.data.x, self.test_loader, self.edge_weight, self.device)
else:
out = self.model.inference(self.data.x, self.test_loader, self.device)
y_true = self.data.y.cpu().unsqueeze(-1)
y_pred = out.argmax(dim=-1, keepdim=True)
results = []
for mask in [self.data.train_mask, self.data.test_mask]:
results += [int(y_pred[mask].eq(y_true[mask]).sum()) / int(mask.sum())]
return results
def posterior(self):
self.logger.debug("generating posteriors")
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self.model.eval()
self._gen_test_loader()
if self.target_model == 'GCN':
posteriors = self.model.inference(self.data.x, self.test_loader, self.edge_weight, self.device)
else:
posteriors = self.model.inference(self.data.x, self.test_loader, self.device)
for _, mask in self.data('test_mask'):
posteriors = F.log_softmax(posteriors[mask], dim=-1)
return posteriors.detach()
def generate_embeddings(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_test_loader()
if self.target_model == 'GCN':
logits = self.model.inference(self.data.x, self.test_loader, self.edge_weight, self.device)
else:
logits = self.model.inference(self.data.x, self.test_loader, self.device)
return logits
def _gen_train_loader(self):
self.logger.info("generate train loader")
train_indices = np.nonzero(self.data.train_mask.cpu().numpy())[0]
edge_index = utils.filter_edge_index(self.data.edge_index, train_indices, reindex=False)
if edge_index.shape[1] == 0:
edge_index = torch.tensor([[1, 2], [2, 1]])
self.train_loader = NeighborSampler(
edge_index, node_idx=self.data.train_mask,
sizes=[5, 5], num_nodes=self.data.num_nodes,
batch_size=self.args['batch_size'], shuffle=True,
num_workers=0)
if self.target_model == 'GCN':
_, self.edge_weight = gcn_norm(self.data.edge_index, edge_weight=None, num_nodes=self.data.x.shape[0],
add_self_loops=False)
self.logger.info("generate train loader finish")
def _gen_test_loader(self):
test_indices = np.nonzero(self.data.train_mask.cpu().numpy())[0]
if not self.args['use_test_neighbors']:
edge_index = utils.filter_edge_index(self.data.edge_index, test_indices, reindex=False)
else:
edge_index = self.data.edge_index
if edge_index.shape[1] == 0:
edge_index = torch.tensor([[1, 3], [3, 1]])
self.test_loader = NeighborSampler(
edge_index, node_idx=None,
sizes=[-1], num_nodes=self.data.num_nodes,
# sizes=[5], num_nodes=self.data.num_nodes,
batch_size=self.args['test_batch_size'], shuffle=False,
num_workers=0)
if self.target_model == 'GCN':
_, self.edge_weight = gcn_norm(self.data.edge_index, edge_weight=None, num_nodes=self.data.x.shape[0],
add_self_loops=False)
if __name__ == '__main__':
os.chdir('../')
args = parameter_parser()
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
train_indices, test_indices = train_test_split(np.arange((data.num_nodes)), test_size=0.2, random_state=100)
data.train_mask, data.test_mask = torch.zeros(data.num_nodes, dtype=torch.bool), torch.zeros(data.num_nodes,
dtype=torch.bool)
data.train_mask[train_indices] = True
data.test_mask[test_indices] = True
graphsage = NodeClassifier(dataset.num_features, dataset.num_classes, args, data)
graphsage.train_model()
| 7,966 | 38.636816 | 114 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/__init__.py | 0 | 0 | 0 | py |
|
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gin/gin.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid, Reddit
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gin.gin_net import GINNet
import config
class GIN(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(GIN, self).__init__()
self.logger = logging.getLogger('gin')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = GINNet(num_feats, num_classes).to(self.device)
self.data = data
def train_model(self, num_epochs=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
for epoch in range(num_epochs):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data)[self.data.train_mask]
loss = F.nll_loss(output, self.data.y[self.data.train_mask])
# loss = F.nll_loss(output, self.data.y.squeeze(1)[self.data.train_mask])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'citeseer'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gin = GIN(dataset.num_features, dataset.num_classes, data)
gin.train_model()
| 2,338 | 31.943662 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gin/gin_net.py | import torch
import torch.nn.functional as F
from torch.nn import Sequential, Linear, ReLU
from torch_geometric.nn import GINConv
class GINNet(torch.nn.Module):
def __init__(self, num_feats, num_classes):
super(GINNet, self).__init__()
dim = 32
nn1 = Sequential(Linear(num_feats, dim), ReLU(), Linear(dim, dim))
self.conv1 = GINConv(nn1)
self.bn1 = torch.nn.BatchNorm1d(dim)
nn2 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv2 = GINConv(nn2)
self.bn2 = torch.nn.BatchNorm1d(dim)
nn3 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv3 = GINConv(nn3)
self.bn3 = torch.nn.BatchNorm1d(dim)
nn4 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv4 = GINConv(nn4)
self.bn4 = torch.nn.BatchNorm1d(dim)
nn5 = Sequential(Linear(dim, dim), ReLU(), Linear(dim, dim))
self.conv5 = GINConv(nn5)
self.bn5 = torch.nn.BatchNorm1d(dim)
self.fc1 = Linear(dim, dim)
self.fc2 = Linear(dim, num_classes)
def forward(self, data, batch=None):
x = F.relu(self.conv1(data.x, data.edge_index))
x = self.bn1(x)
x = F.relu(self.conv2(x, data.edge_index))
x = self.bn2(x)
x = F.relu(self.fc1(x))
x = F.dropout(x, p=0.5, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
| 1,558 | 30.18 | 74 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gat/gat_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import GATConv
class GATNet(torch.nn.Module):
def __init__(self, num_feats, num_classes, dropout=0.6):
super(GATNet, self).__init__()
self.dropout = dropout
self.conv1 = GATConv(num_feats, 8, heads=8, dropout=self.dropout, add_self_loops=False)
# On the Pubmed dataset, use heads=8 in conv2.
self.conv2 = GATConv(8 * 8, num_classes, heads=1, concat=False, dropout=self.dropout, add_self_loops=False)
# self.conv2 = GATConv(8 * 8, num_classes, heads=8, concat=False, dropout=self.dropout, add_self_loops=False)
self.reset_parameters()
def forward(self, data):
x = F.dropout(data.x, p=self.dropout, training=self.training)
x = F.elu(self.conv1(x, data.edge_index))
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.conv2(x, data.edge_index)
return F.log_softmax(x, dim=1)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
| 1,074 | 36.068966 | 117 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gat/gat.py | import logging
import os
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
import config
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gat.gat_net import GATNet
class GAT(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(GAT, self).__init__()
self.logger = logging.getLogger('gat')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = GATNet(num_feats, num_classes)
self.data = data
def train_model(self, num_epoch=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.005, weight_decay=0.0001)
for epoch in range(num_epoch):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data)[self.data.train_mask]
loss = F.nll_loss(output, self.data.y[self.data.train_mask])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
# self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gat = GAT(dataset.num_features, dataset.num_classes, data)
gat.train_model()
# gat.evaluate_model()
| 2,273 | 31.028169 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/graphsage/graphsage.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from torch_geometric.data import NeighborSampler
from lib_gnn_model.graphsage.graphsage_net import SageNet
from lib_gnn_model.gnn_base import GNNBase
import config
class SAGE(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(SAGE, self).__init__()
self.logger = logging.getLogger('graphsage')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# self.device = torch.device('cpu')
self.model = SageNet(num_feats, 256, num_classes).to(self.device)
self.data = data
def train_model(self, num_epochs=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self.data.y = self.data.y.squeeze().to(self.device)
self._gen_train_loader()
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01, weight_decay=0.001)
for epoch in range(num_epochs):
self.logger.info('epoch %s' % (epoch,))
for batch_size, n_id, adjs in self.train_loader:
# `adjs` holds a list of `(edge_index, e_id, size)` tuples.
adjs = [adj.to(self.device) for adj in adjs]
optimizer.zero_grad()
out = self.model(self.data.x[n_id], adjs)
loss = F.nll_loss(out, self.data.y[n_id[:batch_size]])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info(f'Train: {train_acc:.4f}, Test: {test_acc:.4f}')
@torch.no_grad()
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_subgraph_loader()
out = self.model.inference(self.data.x, self.subgraph_loader, self.device)
y_true = self.data.y.cpu().unsqueeze(-1)
y_pred = out.argmax(dim=-1, keepdim=True)
results = []
for mask in [self.data.train_mask, self.data.test_mask]:
results += [int(y_pred[mask].eq(y_true[mask]).sum()) / int(mask.sum())]
return results
def posterior(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_subgraph_loader()
posteriors = self.model.inference(self.data.x, self.subgraph_loader, self.device)
for _, mask in self.data('test_mask'):
posteriors = F.log_softmax(posteriors[mask], dim=-1)
return posteriors.detach()
def generate_embeddings(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
self._gen_subgraph_loader()
logits = self.model.inference(self.data.x, self.subgraph_loader, self.device)
return logits
def _gen_train_loader(self):
if self.data.edge_index.shape[1] == 0:
self.data.edge_index = torch.tensor([[1, 2], [2, 1]])
self.train_loader = NeighborSampler(self.data.edge_index, node_idx=self.data.train_mask,
# sizes=[25, 10], batch_size=128, shuffle=True,
# sizes=[25, 10], num_nodes=self.data.num_nodes,
sizes=[10, 10], num_nodes=self.data.num_nodes,
# sizes=[5, 5], num_nodes=self.data.num_nodes,
# batch_size=128, shuffle=True,
batch_size=64, shuffle=True,
num_workers=0)
def _gen_subgraph_loader(self):
self.subgraph_loader = NeighborSampler(self.data.edge_index, node_idx=None,
# sizes=[-1], num_nodes=self.data.num_nodes,
sizes=[10], num_nodes=self.data.num_nodes,
# batch_size=128, shuffle=False,
batch_size=64, shuffle=False,
num_workers=0)
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
graphsage = SAGE(dataset.num_features, dataset.num_classes, data)
graphsage.train_model()
| 4,883 | 39.363636 | 96 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/graphsage/graphsage_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import SAGEConv
class SageNet(torch.nn.Module):
def __init__(self, in_channels, hidden_channels, out_channels):
super(SageNet, self).__init__()
self.num_layers = 2
self.convs = torch.nn.ModuleList()
self.convs.append(SAGEConv(in_channels, hidden_channels))
self.convs.append(SAGEConv(hidden_channels, out_channels))
def forward(self, x, adjs):
# `train_loader` computes the k-hop neighborhood of a batch of nodes,
# and returns, for each layer, a bipartite graph object, holding the
# bipartite edges `edge_index`, the index `e_id` of the original edges,
# and the size/shape `size` of the bipartite graph.
# Target nodes are also included in the source nodes so that one can
# easily apply skip-connections or add self-loops.
for i, (edge_index, _, size) in enumerate(adjs):
x_target = x[:size[1]] # Target nodes are always placed first.
x = self.convs[i]((x, x_target), edge_index)
if i != self.num_layers - 1:
x = F.relu(x)
x = F.dropout(x, p=0.5, training=self.training)
return F.log_softmax(x, dim=-1)
def inference(self, x_all, subgraph_loader, device):
# Compute representations of nodes layer by layer, using *all*
# available edges. This leads to faster computation in contrast to
# immediately computing the final representations of each batch.
for i in range(self.num_layers):
xs = []
for batch_size, n_id, adj in subgraph_loader:
edge_index, _, size = adj.to(device)
x = x_all[n_id].to(device)
x_target = x[:size[1]]
x = self.convs[i]((x, x_target), edge_index)
if i != self.num_layers - 1:
x = F.relu(x)
xs.append(x.cpu())
x_all = torch.cat(xs, dim=0)
return x_all
def reset_parameters(self):
for i in range(self.num_layers):
self.convs[i].reset_parameters()
| 2,154 | 37.482143 | 79 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gcn/gcn_net.py | import torch
import torch.nn.functional as F
from torch_geometric.nn import GCNConv
class GCNNet(torch.nn.Module):
def __init__(self, num_feats, num_classes):
super(GCNNet, self).__init__()
self.conv1 = GCNConv(num_feats, 16, cached=True, add_self_loops=False)
self.conv2 = GCNConv(16, num_classes, cached=True, add_self_loops=False)
def forward(self, data):
x, edge_index, edge_weight = data.x, data.edge_index, data.edge_attr
x = F.relu(self.conv1(x, edge_index, edge_weight))
x = F.dropout(x, training=self.training)
x = self.conv2(x, edge_index, edge_weight)
return F.log_softmax(x, dim=-1)
def reset_parameters(self):
self.conv1.reset_parameters()
self.conv2.reset_parameters()
| 781 | 31.583333 | 80 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/gcn/gcn.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.gcn.gcn_net import GCNNet
import config
class GCN(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(GCN, self).__init__()
self.logger = logging.getLogger('gcn')
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = GCNNet(num_feats, num_classes)
self.data = data
def train_model(self, num_epoch=100):
self.model.train()
self.model.reset_parameters()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
for epoch in range(num_epoch):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data)[self.data.train_mask]
loss = F.nll_loss(output, self.data.y[self.data.train_mask])
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'cora'
dataset = Planetoid(config.RAW_DATA_PATH, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gcn = GCN(dataset.num_features, dataset.num_classes, data)
gcn.train_model() | 2,221 | 31.202899 | 92 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/mlp/mlp.py | import os
import logging
import torch
import torch.nn.functional as F
import torch_geometric.transforms as T
from torch_geometric.datasets import Planetoid
from lib_gnn_model.gnn_base import GNNBase
from lib_gnn_model.mlp.mlpnet import MLPNet
import config
class MLP(GNNBase):
def __init__(self, num_feats, num_classes, data=None):
super(MLP, self).__init__()
self.logger = logging.getLogger(__name__)
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.model = MLPNet(num_feats, num_classes)
self.data = data
def train_model(self, num_epoch=100):
self.model.train()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
optimizer = torch.optim.Adam(self.model.parameters(), lr=0.01)
for epoch in range(num_epoch):
self.logger.info('epoch %s' % (epoch,))
optimizer.zero_grad()
output = self.model(self.data.x)[self.data.train_mask]
# loss = F.nll_loss(output, self.data.y[self.data.train_mask])
loss = torch.nn.CrossEntropyLoss(output, self.data.y[self.data.train_mask].squeeze())
loss.backward()
optimizer.step()
train_acc, test_acc = self.evaluate_model()
self.logger.info('train acc: %s, test acc: %s' % (train_acc, test_acc))
def evaluate_model(self):
self.model.eval()
self.model, self.data = self.model.to(self.device), self.data.to(self.device)
logits, accs = self.model(self.data.x), []
for _, mask in self.data('train_mask', 'test_mask'):
pred = logits[mask].max(1)[1]
acc = pred.eq(self.data.y[mask]).sum().item() / mask.sum().item()
accs.append(acc)
return accs
def posterior(self):
self.model.eval()
posteriors = self.model(self.data.x)
for _, mask in self.data('test_mask'):
posteriors = posteriors[mask]
return posteriors
if __name__ == '__main__':
os.chdir('../../')
output_file = None
logging.basicConfig(filename=output_file,
format='%(levelname)s:%(asctime)s: - %(name)s - : %(message)s',
level=logging.DEBUG)
dataset_name = 'Cora'
dataset = Planetoid(config.RAW_DATA_PATH + dataset_name, dataset_name, transform=T.NormalizeFeatures())
data = dataset[0]
gcn = MLP(dataset.num_features, dataset.num_classes, data)
gcn.train_model() | 2,518 | 31.294872 | 107 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/mlp/__init__.py | 0 | 0 | 0 | py |
|
Graph-Unlearning | Graph-Unlearning-main/lib_gnn_model/mlp/mlpnet.py | from torch import nn
import torch.nn.functional as F
class MLPNet(nn.Module):
def __init__(self, input_size, num_classes):
super(MLPNet, self).__init__()
self.xent = nn.CrossEntropyLoss()
self.layers = nn.Sequential(
nn.Linear(input_size, 250),
nn.Linear(250, 100),
nn.Linear(100, num_classes)
)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.layers(x)
return F.softmax(x, dim=1)
def loss(self, nodes, labels):
scores = self.forward(nodes)
return self.xent(scores, labels.squeeze())
def reset_parameters(self):
return 0
| 668 | 23.777778 | 50 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_graph_partition.py | import logging
import time
import torch
from sklearn.model_selection import train_test_split
import numpy as np
from torch_geometric.data import Data
import torch_geometric as tg
import networkx as nx
from exp.exp import Exp
from lib_utils.utils import connected_component_subgraphs
from lib_graph_partition.graph_partition import GraphPartition
from lib_utils import utils
class ExpGraphPartition(Exp):
def __init__(self, args):
super(ExpGraphPartition, self).__init__(args)
self.logger = logging.getLogger('exp_graph_partition')
self.load_data()
self.train_test_split()
self.gen_train_graph()
self.graph_partition()
self.generate_shard_data()
def load_data(self):
self.data = self.data_store.load_raw_data()
def train_test_split(self):
if self.args['is_split']:
self.logger.info('splitting train/test data')
self.train_indices, self.test_indices = train_test_split(np.arange((self.data.num_nodes)), test_size=self.args['test_ratio'], random_state=100)
self.data_store.save_train_test_split(self.train_indices, self.test_indices)
self.data.train_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.train_indices))
self.data.test_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.test_indices))
else:
self.train_indices, self.test_indices = self.data_store.load_train_test_split()
self.data.train_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.train_indices))
self.data.test_mask = torch.from_numpy(np.isin(np.arange(self.data.num_nodes), self.test_indices))
def gen_train_graph(self):
# delete ratio of edges and update the train graph
if self.args['ratio_deleted_edges'] != 0:
self.logger.debug("Before edge deletion. train data #.Nodes: %f, #.Edges: %f" % (
self.data.num_nodes, self.data.num_edges))
# self._ratio_delete_edges()
self.data.edge_index = self._ratio_delete_edges(self.data.edge_index)
# decouple train test edges.
edge_index = self.data.edge_index.numpy()
test_edge_indices = np.logical_or(np.isin(edge_index[0], self.test_indices),
np.isin(edge_index[1], self.test_indices))
train_edge_indices = np.logical_not(test_edge_indices)
edge_index_train = edge_index[:, train_edge_indices]
self.train_graph = nx.Graph()
self.train_graph.add_nodes_from(self.train_indices)
# use largest connected graph as train graph
if self.args['is_prune']:
self._prune_train_set()
# reconstruct a networkx train graph
for u, v in np.transpose(edge_index_train):
self.train_graph.add_edge(u, v)
self.logger.debug("After edge deletion. train graph #.Nodes: %f, #.Edges: %f" % (
self.train_graph.number_of_nodes(), self.train_graph.number_of_edges()))
self.logger.debug("After edge deletion. train data #.Nodes: %f, #.Edges: %f" % (
self.data.num_nodes, self.data.num_edges))
self.data_store.save_train_data(self.data)
self.data_store.save_train_graph(self.train_graph)
def graph_partition(self):
if self.args['is_partition']:
self.logger.info('graph partitioning')
start_time = time.time()
partition = GraphPartition(self.args, self.train_graph, self.data)
self.community_to_node = partition.graph_partition()
partition_time = time.time() - start_time
self.logger.info("Partition cost %s seconds." % partition_time)
self.data_store.save_community_data(self.community_to_node)
else:
self.community_to_node = self.data_store.load_community_data()
def generate_shard_data(self):
self.logger.info('generating shard data')
self.shard_data = {}
for shard in range(self.args['num_shards']):
train_shard_indices = list(self.community_to_node[shard])
shard_indices = np.union1d(train_shard_indices, self.test_indices)
x = self.data.x[shard_indices]
y = self.data.y[shard_indices]
edge_index = utils.filter_edge_index_1(self.data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, self.test_indices))
self.shard_data[shard] = data
self.data_store.save_shard_data(self.shard_data)
def _prune_train_set(self):
# extract the the maximum connected component
self.logger.debug("Before Prune... #. of Nodes: %f, #. of Edges: %f" % (
self.train_graph.number_of_nodes(), self.train_graph.number_of_edges()))
self.train_graph = max(connected_component_subgraphs(self.train_graph), key=len)
self.logger.debug("After Prune... #. of Nodes: %f, #. of Edges: %f" % (
self.train_graph.number_of_nodes(), self.train_graph.number_of_edges()))
# self.train_indices = np.array(self.train_graph.nodes)
def _ratio_delete_edges(self, edge_index):
edge_index = edge_index.numpy()
unique_indices = np.where(edge_index[0] < edge_index[1])[0]
unique_indices_not = np.where(edge_index[0] > edge_index[1])[0]
remain_indices = np.random.choice(unique_indices,
int(unique_indices.shape[0] * (1.0 - self.args['ratio_deleted_edges'])),
replace=False)
remain_encode = edge_index[0, remain_indices] * edge_index.shape[1] * 2 + edge_index[1, remain_indices]
unique_encode_not = edge_index[1, unique_indices_not] * edge_index.shape[1] * 2 + edge_index[0, unique_indices_not]
sort_indices = np.argsort(unique_encode_not)
remain_indices_not = unique_indices_not[sort_indices[np.searchsorted(unique_encode_not, remain_encode, sorter=sort_indices)]]
remain_indices = np.union1d(remain_indices, remain_indices_not)
# self.data.edge_index = torch.from_numpy(edge_index[:, remain_indices])
return torch.from_numpy(edge_index[:, remain_indices])
| 6,423 | 44.560284 | 155 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_attack_unlearning.py | import logging
import time
from collections import defaultdict
import numpy as np
import torch
import torch_geometric as tg
from torch_geometric.data import Data
from scipy.spatial import distance
import config
from exp.exp import Exp
from lib_graph_partition.graph_partition import GraphPartition
from lib_gnn_model.node_classifier import NodeClassifier
from lib_aggregator.aggregator import Aggregator
from lib_utils import utils
class ExpAttackUnlearning(Exp):
def __init__(self, args):
super(ExpAttackUnlearning, self).__init__(args)
self.logger = logging.getLogger('exp_attack_unlearning')
# 1. respond to the unlearning requests
self.load_preprocessed_data()
# self.graph_unlearning_request_respond()
if self.args['repartition']:
with open(config.MODEL_PATH + self.args['dataset_name'] + '/' + self.args['target_model']+"_unlearned_indices") as file:
node_unlearning_indices = [line.rstrip() for line in file]
for unlearned_node in node_unlearning_indices:
self.graph_unlearning_request_respond(int(unlearned_node))
else:
self.graph_unlearning_request_respond()
# 2. evalute the attack performance
self.attack_graph_unlearning()
def load_preprocessed_data(self):
self.shard_data = self.data_store.load_shard_data()
self.raw_data = self.data_store.load_raw_data()
self.train_data = self.data_store.load_train_data()
self.train_graph = self.data_store.load_train_graph()
self.train_indices, self.test_indices = self.data_store.load_train_test_split()
self.community_to_node = self.data_store.load_community_data()
num_feats = self.train_data.num_features
num_classes = len(self.train_data.y.unique())
self.target_model = NodeClassifier(num_feats, num_classes, self.args)
def graph_unlearning_request_respond(self, node_unlearning_request=None):
# reindex the node ids
node_to_com = self.data_store.c2n_to_n2c(self.community_to_node)
train_indices_prune = list(node_to_com.keys())
if node_unlearning_request==None:
# generate node unlearning requests
node_unlearning_indices = np.random.choice(train_indices_prune, self.args['num_unlearned_nodes'])
else:
node_unlearning_indices = np.array([node_unlearning_request])
self.num_unlearned_edges =0
unlearning_indices = defaultdict(list)
for node in node_unlearning_indices:
unlearning_indices[node_to_com[node]].append(node)
# delete a list of revoked nodes from train_graph
self.train_graph.remove_nodes_from(node_unlearning_indices)
# delete the revoked nodes from train_data
# by building unlearned data from unlearned train_graph
self.train_data.train_mask = torch.from_numpy(np.isin(np.arange(self.train_data.num_nodes), self.train_indices))
self.train_data.test_mask = torch.from_numpy(np.isin(np.arange(self.train_data.num_nodes), np.append(self.test_indices, node_unlearning_indices)))
# delete the revoked nodes from shard_data
self.shard_data_after_unlearning = {}
self.affected_shard=[]
for shard in range(self.args["num_shards"]):
train_shard_indices = list(self.community_to_node[shard])
# node unlearning
train_shard_indices = np.setdiff1d(train_shard_indices, unlearning_indices[shard])
shard_indices = np.union1d(train_shard_indices, self.test_indices)
x = self.train_data.x[shard_indices]
y = self.train_data.y[shard_indices]
edge_index = utils.filter_edge_index_1(self.train_data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, self.test_indices))
self.shard_data_after_unlearning[shard] = data
self.num_unlearned_edges += self.shard_data[shard].num_edges - self.shard_data_after_unlearning[shard].num_edges
# find the affected shard model
if self.shard_data_after_unlearning[shard].num_nodes != self.shard_data[shard].num_nodes:
self.affected_shard.append(shard)
self.data_store.save_unlearned_data(self.train_graph, 'train_graph')
self.data_store.save_unlearned_data(self.train_data, 'train_data')
self.data_store.save_unlearned_data(self.shard_data_after_unlearning, 'shard_data')
# retrain the correponding shard model
if not self.args['repartition']:
for shard in self.affected_shard:
suffix = "unlearned_"+str(node_unlearning_indices[0])
self._train_shard_model(shard, suffix)
# (if re-partition, re-partition the remaining graph)
# re-train the shard model, save model and optimal weight score
if self.args['repartition']:
suffix="_repartition_unlearned_" + str(node_unlearning_indices[0])
self._repartition(suffix)
for shard in range(self.args["num_shards"]):
self._train_shard_model(shard, suffix)
def _repartition(self, suffix):
# load unlearned train_graph and train_data
train_graph = self.data_store.load_unlearned_data('train_graph')
train_data = self.data_store.load_unlearned_data('train_data')
# repartition
start_time = time.time()
partition = GraphPartition(self.args, train_graph, train_data)
community_to_node = partition.graph_partition()
partition_time = time.time() - start_time
self.logger.info("Partition cost %s seconds." % partition_time)
# save the new partition and shard
self.data_store.save_community_data(community_to_node, suffix)
self._generate_unlearned_repartitioned_shard_data(train_data, community_to_node, self.test_indices)
def _generate_unlearned_repartitioned_shard_data(self, train_data, community_to_node, test_indices):
self.logger.info('generating shard data')
shard_data = {}
for shard in range(self.args['num_shards']):
train_shard_indices = list(community_to_node[shard])
shard_indices = np.union1d(train_shard_indices, test_indices)
x = self.train_data.x[shard_indices]
y = self.train_data.y[shard_indices]
edge_index = utils.filter_edge_index_1(train_data, shard_indices)
data = Data(x=x, edge_index=torch.from_numpy(edge_index), y=y)
data.train_mask = torch.from_numpy(np.isin(shard_indices, train_shard_indices))
data.test_mask = torch.from_numpy(np.isin(shard_indices, test_indices))
shard_data[shard] = data
# self.data_store.save_unlearned_data(shard_data, 'shard_data_repartition')
return shard_data
def _train_shard_model(self, shard, suffix="unlearned"):
self.logger.info('training target models, shard %s' % shard)
# load shard data
self.target_model.data = self.shard_data_after_unlearning[shard]
# retrain shard model
self.target_model.train_model()
# replace shard model
device=torch.device("cpu")
self.target_model.device = device
self.data_store.save_target_model(0, self.target_model, shard, suffix)
# self.data_store.save_unlearned_target_model(0, self.target_model, shard, suffix)
def attack_graph_unlearning(self):
# load unlearned indices
with open(config.MODEL_PATH + self.args['dataset_name'] + "/" + self.args['target_model'] +"_unlearned_indices") as file:
unlearned_indices = [line.rstrip() for line in file]
# member sample query, label as 1
positive_posteriors = self._query_target_model(unlearned_indices, unlearned_indices)
# non-member sample query, label as 0
negative_posteriors = self._query_target_model(unlearned_indices, self.test_indices)
# evaluate attack performance, train multiple shadow models, or calculate posterior entropy, or directly calculate AUC.
self.evaluate_attack_performance(positive_posteriors, negative_posteriors)
def _query_target_model(self, unlearned_indices, test_indices):
# load unlearned data
train_data = self.data_store.load_unlearned_data('train_data')
# load optimal weight score
# optimal_weight=self.data_store.load_optimal_weight(0)
# calculate the final posterior, save as attack feature
self.logger.info('aggregating submodels')
posteriors_a, posteriors_b, posteriors_c =[],[],[]
for i in unlearned_indices:
community_to_node = self.data_store.load_community_data('')
shard_data = self._generate_unlearned_repartitioned_shard_data(train_data, community_to_node, int(i))
posteriors_a.append(self._generate_posteriors(shard_data, ''))
suffix="unlearned_" + str(i)
posteriors_b.append(self._generate_posteriors_unlearned(shard_data, suffix, i))
if self.args['repartition']:
suffix = "_repartition_unlearned_" + str(i)
community_to_node = self.data_store.load_community_data(suffix)
shard_data = self._generate_unlearned_repartitioned_shard_data(train_data, community_to_node, int(i))
suffix = "__repartition_unlearned_" + str(i)
posteriors_c.append(self._generate_posteriors(shard_data, suffix))
return posteriors_a, posteriors_b, posteriors_c
def _generate_posteriors_unlearned(self, shard_data, suffix, unlearned_indice):
import glob
model_path=glob.glob(config.MODEL_PATH+self.args['dataset_name']+"/*_1unlearned_"+str(unlearned_indice))
if not model_path:
self.logger.info("No corresponding unlearned shard model for node %s" % str(unlearned_indice))
return torch.tensor([0]*6)
else:
affected_shard = int(model_path[0].split('/')[-1].split('_')[-4])
posteriors = []
for shard in range(self.args['num_shards']):
if shard == affected_shard:
# load the retrained the shard model
self.data_store.load_target_model(0, self.target_model, shard, suffix)
else:
# self.target_model.model.reset_parameters()
# load unaffected shard model
self.data_store.load_target_model(0, self.target_model, shard, '')
self.device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
self.target_model.model = self.target_model.model.to(self.device)
self.target_model.data = shard_data[shard].to(self.device)
posteriors.append(self.target_model.posterior())
return torch.mean(torch.cat(posteriors, dim=0), dim=0)
def _generate_posteriors(self, shard_data, suffix):
posteriors = []
for shard in range(self.args['num_shards']):
# self.target_model.model.reset_parameters()
self.data_store.load_target_model(0, self.target_model, shard, suffix)
self.device = torch.device('cuda:3' if torch.cuda.is_available() else 'cpu')
self.target_model.model = self.target_model.model.to(self.device)
self.target_model.data = shard_data[shard].to(self.device)
posteriors.append(self.target_model.posterior())
return torch.mean(torch.cat(posteriors, dim=0), dim=0)
def evaluate_attack_performance(self, positive_posteriors, negative_posteriors):
# constrcut attack data
label = torch.cat((torch.ones(len(positive_posteriors[0])), torch.zeros(len(negative_posteriors[0]))))
data={}
for i in range(2):
data[i] = torch.cat((torch.stack(positive_posteriors[i]), torch.stack(negative_posteriors[i])),0)
# calculate l2 distance
model_b_distance = self._calculate_distance(data[0], data[1])
# directly calculate AUC with feature and labels
attack_auc_b = self.evaluate_attack_with_AUC(model_b_distance, label)
if self.args['repartition']:
model_c_distance = self._calculate_distance(data[0], data[2])
attack_auc_c = self.evaluate_attack_with_AUC(model_c_distance, label)
self.logger.info("Attack_Model_B AUC: %s | Attack_Model_C AUC: %s" % (attack_auc_b, attack_auc_c))
def evaluate_attack_with_AUC(self, data, label):
from sklearn.metrics import roc_auc_score
self.logger.info("Directly calculate the attack AUC")
return roc_auc_score(label, data.reshape(-1, 1))
def _calculate_distance(self, data0, data1, distance='l2_norm' ):
if distance == 'l2_norm':
return np.array([np.linalg.norm(data0[i]-data1[i]) for i in range(len(data0))])
elif distance =='direct_diff':
return data0 - data1
else:
raise Exception("Unsupported distance")
| 13,321 | 48.895131 | 154 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp.py | import logging
from lib_dataset.data_store import DataStore
class Exp:
def __init__(self, args):
self.logger = logging.getLogger('exp')
self.args = args
self.data_store = DataStore(args)
def load_data(self):
pass
| 258 | 16.266667 | 46 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_node_edge_unlearning.py | import logging
import pickle
import time
from collections import defaultdict
import numpy as np
import torch
from torch_geometric.data import Data
import config
from exp.exp import Exp
from lib_gnn_model.graphsage.graphsage import SAGE
from lib_gnn_model.gat.gat import GAT
from lib_gnn_model.gin.gin import GIN
from lib_gnn_model.gcn.gcn import GCN
from lib_gnn_model.mlp.mlp import MLP
from lib_gnn_model.node_classifier import NodeClassifier
from lib_aggregator.aggregator import Aggregator
from lib_utils import utils
class ExpNodeEdgeUnlearning(Exp):
def __init__(self, args):
super(ExpNodeEdgeUnlearning, self).__init__(args)
self.logger = logging.getLogger('exp_node_edge_unlearning')
self.target_model_name = self.args['target_model']
self.load_data()
self.determine_target_model()
self.run_exp()
def run_exp(self):
# unlearning efficiency
run_f1 = np.empty((0))
unlearning_time = np.empty((0))
for run in range(self.args['num_runs']):
self.logger.info("Run %f" % run)
self.train_target_models(run)
aggregate_f1_score = self.aggregate(run)
# node_unlearning_time = self.unlearning_time_statistic()
node_unlearning_time = 0
run_f1 = np.append(run_f1, aggregate_f1_score)
unlearning_time = np.append(unlearning_time, node_unlearning_time)
self.num_unlearned_edges = 0
# model utility
self.f1_score_avg = np.average(run_f1)
self.f1_score_std = np.std(run_f1)
self.unlearning_time_avg = np.average(unlearning_time)
self.unlearning_time_std = np.std(unlearning_time)
self.logger.info(
"%s %s %s %s" % (self.f1_score_avg, self.f1_score_std, self.unlearning_time_avg, self.unlearning_time_std))
def load_data(self):
self.shard_data = self.data_store.load_shard_data()
self.raw_data = self.data_store.load_raw_data()
self.train_data = self.data_store.load_train_data()
self.unlearned_shard_data = self.shard_data
def determine_target_model(self):
num_feats = self.train_data.num_features
num_classes = len(self.train_data.y.unique())
if not self.args['is_use_batch']:
if self.target_model_name == 'SAGE':
self.target_model = SAGE(num_feats, num_classes)
elif self.target_model_name == 'GCN':
self.target_model = GCN(num_feats, num_classes)
elif self.target_model_name == 'GAT':
self.target_model = GAT(num_feats, num_classes)
elif self.target_model_name == 'GIN':
self.target_model = GIN(num_feats, num_classes)
else:
raise Exception('unsupported target model')
else:
if self.target_model_name == 'MLP':
self.target_model = MLP(num_feats, num_classes)
else:
self.target_model = NodeClassifier(num_feats, num_classes, self.args)
def train_target_models(self, run):
if self.args['is_train_target_model']:
self.logger.info('training target models')
self.time = {}
for shard in range(self.args['num_shards']):
self.time[shard] = self._train_model(run, shard)
def aggregate(self, run):
self.logger.info('aggregating submodels')
# posteriors, true_label = self.generate_posterior()
aggregator = Aggregator(run, self.target_model, self.train_data, self.unlearned_shard_data, self.args)
aggregator.generate_posterior()
self.aggregate_f1_score = aggregator.aggregate()
self.logger.info("Final Test F1: %s" % (self.aggregate_f1_score,))
return self.aggregate_f1_score
def _generate_unlearning_request(self, num_unlearned="assign"):
node_list = []
for key, value in self.community_to_node.items():
# node_list.extend(value.tolist())
node_list.extend(value)
if num_unlearned == "assign":
num_of_unlearned_nodes = self.args['num_unlearned_nodes']
elif num_unlearned == "ratio":
num_of_unlearned_nodes = int(self.args['ratio_unlearned_nodes'] * len(node_list))
if self.args['unlearning_request'] == 'random':
unlearned_nodes_indices = np.random.choice(node_list, num_of_unlearned_nodes, replace=False)
elif self.args['unlearning_request'] == 'top1':
sorted_shards = sorted(self.community_to_node.items(), key=lambda x: len(x[1]), reverse=True)
unlearned_nodes_indices = np.random.choice(sorted_shards[0][1], num_of_unlearned_nodes, replace=False)
elif self.args['unlearning_request'] == 'adaptive':
sorted_shards = sorted(self.community_to_node.items(), key=lambda x: len(x[1]), reverse=True)
candidate_list = np.concatenate([sorted_shards[i][1] for i in range(int(self.args['num_shards']/2)+1)], axis=0)
unlearned_nodes_indices = np.random.choice(candidate_list, num_of_unlearned_nodes, replace=False)
elif self.args['unlearning_request'] == 'last5':
sorted_shards = sorted(self.community_to_node.items(), key=lambda x: len(x[1]), reverse=False)
candidate_list = np.concatenate([sorted_shards[i][1] for i in range(int(self.args['num_shards']/2)+1)], axis=0)
unlearned_nodes_indices = np.random.choice(candidate_list, num_of_unlearned_nodes, replace=False)
return unlearned_nodes_indices
def unlearning_time_statistic(self):
if self.args['is_train_target_model'] and self.args['num_shards'] != 1:
# random sample 5% nodes, find their belonging communities
unlearned_nodes = self._generate_unlearning_request(num_unlearned="ratio")
belong_community = []
for sample_node in range(len(unlearned_nodes)):
for community, node in self.community_to_node.items():
if np.in1d(unlearned_nodes[sample_node], node).any():
belong_community.append(community)
# calculate the total unlearning time and group unlearning time
group_unlearning_time = []
node_unlearning_time = []
for shard in range(self.args['num_shards']):
if belong_community.count(shard) != 0:
group_unlearning_time.append(self.time[shard])
node_unlearning_time.extend([float(self.time[shard]) for j in range(belong_community.count(shard))])
return node_unlearning_time
elif self.args['is_train_target_model'] and self.args['num_shards'] == 1:
return self.time[0]
else:
return 0
def _train_model(self, run, shard):
self.logger.info('training target models, run %s, shard %s' % (run, shard))
start_time = time.time()
self.target_model.data = self.unlearned_shard_data[shard]
self.target_model.train_model()
train_time = time.time() - start_time
self.data_store.save_target_model(run, self.target_model, shard)
return train_time
| 7,194 | 42.606061 | 123 | py |
Graph-Unlearning | Graph-Unlearning-main/exp/exp_unlearning.py | import logging
import time
import numpy as np
from exp.exp import Exp
from lib_gnn_model.graphsage.graphsage import SAGE
from lib_gnn_model.gat.gat import GAT
from lib_gnn_model.gin.gin import GIN
from lib_gnn_model.gcn.gcn import GCN
from lib_gnn_model.mlp.mlp import MLP
from lib_gnn_model.node_classifier import NodeClassifier
from lib_aggregator.aggregator import Aggregator
class ExpUnlearning(Exp):
def __init__(self, args):
super(ExpUnlearning, self).__init__(args)
self.logger = logging.getLogger('exp_unlearning')
self.target_model_name = self.args['target_model']
self.num_opt_samples = self.args['num_opt_samples']
self.load_data()
self.determine_target_model()
run_f1 = np.empty((0))
unlearning_time = np.empty((0))
for run in range(self.args['num_runs']):
self.logger.info("Run %f" % run)
self.train_target_models(run)
aggregate_f1_score = self.aggregate(run)
node_unlearning_time = self.unlearning_time_statistic()
run_f1 = np.append(run_f1, aggregate_f1_score)
unlearning_time = np.append(unlearning_time, node_unlearning_time)
self.f1_score_avg = np.average(run_f1)
self.f1_score_std = np.std(run_f1)
self.unlearning_time_avg = np.average(unlearning_time)
self.unlearning_time_std = np.std(unlearning_time)
self.logger.info("%s %s %s %s" % (self.f1_score_avg, self.f1_score_std, self.unlearning_time_avg, self.unlearning_time_std))
def load_data(self):
self.shard_data = self.data_store.load_shard_data()
self.data = self.data_store.load_raw_data()
def determine_target_model(self):
num_feats = self.data.num_features
num_classes = len(self.data.y.unique())
if not self.args['is_use_batch']:
if self.target_model_name == 'SAGE':
self.target_model = SAGE(num_feats, num_classes)
elif self.target_model_name == 'GCN':
self.target_model = GCN(num_feats, num_classes)
elif self.target_model_name == 'GAT':
self.target_model = GAT(num_feats, num_classes)
elif self.target_model_name == 'GIN':
self.target_model = GIN(num_feats, num_classes)
else:
raise Exception('unsupported target model')
else:
if self.target_model_name == 'MLP':
self.target_model = MLP(num_feats, num_classes)
else:
self.target_model = NodeClassifier(num_feats, num_classes, self.args)
def train_target_models(self, run):
if self.args['is_train_target_model']:
self.logger.info('training target models')
self.time = {}
for shard in range(self.args['num_shards']):
self.time[shard] = self._train_model(run, shard)
def aggregate(self, run):
self.logger.info('aggregating submodels')
start_time = time.time()
aggregator = Aggregator(run, self.target_model, self.data, self.shard_data, self.args)
aggregator.generate_posterior()
self.aggregate_f1_score = aggregator.aggregate()
aggregate_time = time.time() - start_time
self.logger.info("Partition cost %s seconds." % aggregate_time)
self.logger.info("Final Test F1: %s" % (self.aggregate_f1_score,))
return self.aggregate_f1_score
def unlearning_time_statistic(self):
if self.args['is_train_target_model'] and self.args['num_shards'] != 1:
self.community_to_node = self.data_store.load_community_data()
node_list = []
for key, value in self.community_to_node.items():
node_list.extend(value)
# random sample 5% nodes, find their belonging communities
sample_nodes = np.random.choice(node_list, int(0.05 * len(node_list)))
belong_community = []
for sample_node in range(len(sample_nodes)):
for community, node in self.community_to_node.items():
if np.in1d(sample_nodes[sample_node], node).any():
belong_community.append(community)
# calculate the total unlearning time and group unlearning time
group_unlearning_time = []
node_unlearning_time = []
for shard in range(self.args['num_shards']):
if belong_community.count(shard) != 0:
group_unlearning_time.append(self.time[shard])
node_unlearning_time.extend([float(self.time[shard]) for j in range(belong_community.count(shard))])
return node_unlearning_time
elif self.args['is_train_target_model'] and self.args['num_shards'] == 1:
return self.time[0]
else:
return 0
def _train_model(self, run, shard):
self.logger.info('training target models, run %s, shard %s' % (run, shard))
start_time = time.time()
self.target_model.data = self.shard_data[shard]
self.target_model.train_model()
train_time = time.time() - start_time
self.data_store.save_target_model(run, self.target_model, shard)
self.logger.info("Model training time: %s" % (train_time))
return train_time
| 5,345 | 39.195489 | 132 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_dataset/data_store.py | import os
import pickle
import logging
import shutil
import numpy as np
import torch
from torch_geometric.datasets import Planetoid, Coauthor
import torch_geometric.transforms as T
import config
class DataStore:
def __init__(self, args):
self.logger = logging.getLogger('data_store')
self.args = args
self.dataset_name = self.args['dataset_name']
self.num_features = {
"cora": 1433,
"pubmed": 500,
"citeseer": 3703,
"Coauthor_CS": 6805,
"Coauthor_Phys": 8415
}
self.partition_method = self.args['partition_method']
self.num_shards = self.args['num_shards']
self.target_model = self.args['target_model']
self.determine_data_path()
def determine_data_path(self):
embedding_name = '_'.join(('embedding', self._extract_embedding_method(self.partition_method),
str(self.args['ratio_deleted_edges'])))
community_name = '_'.join(('community', self.partition_method, str(self.num_shards),
str(self.args['ratio_deleted_edges'])))
shard_name = '_'.join(('shard_data', self.partition_method, str(self.num_shards),
str(self.args['shard_size_delta']), str(self.args['ratio_deleted_edges'])))
target_model_name = '_'.join((self.target_model, self.partition_method, str(self.num_shards),
str(self.args['shard_size_delta']), str(self.args['ratio_deleted_edges'])))
optimal_weight_name = '_'.join((self.target_model, self.partition_method, str(self.num_shards),
str(self.args['shard_size_delta']), str(self.args['ratio_deleted_edges'])))
processed_data_prefix = config.PROCESSED_DATA_PATH + self.dataset_name + "/"
self.train_test_split_file = processed_data_prefix + "train_test_split" + str(self.args['test_ratio'])
self.train_data_file = processed_data_prefix + "train_data"
self.train_graph_file = processed_data_prefix + "train_graph"
self.embedding_file = processed_data_prefix + embedding_name
self.community_file = processed_data_prefix + community_name
self.shard_file = processed_data_prefix + shard_name
self.unlearned_file = processed_data_prefix+ '_'.join(('unlearned', str(self.args['num_unlearned_nodes'])))
self.target_model_file = config.MODEL_PATH + self.dataset_name + '/' + target_model_name
self.optimal_weight_file = config.ANALYSIS_PATH + 'optimal/' + self.dataset_name + '/' + optimal_weight_name
self.posteriors_file = config.ANALYSIS_PATH + 'posteriors/' + self.dataset_name + '/' + target_model_name
dir_lists = [s + self.dataset_name for s in [config.PROCESSED_DATA_PATH,
config.MODEL_PATH,
config.ANALYSIS_PATH + 'optimal/',
config.ANALYSIS_PATH + 'posteriors/']]
for dir in dir_lists:
self._check_and_create_dirs(dir)
def _check_and_create_dirs(self, folder):
if not os.path.exists(folder):
try:
self.logger.info("checking directory %s", folder)
os.makedirs(folder, exist_ok=True)
self.logger.info("new directory %s created", folder)
except OSError as error:
self.logger.info("deleting old and creating new empty %s", folder)
shutil.rmtree(folder)
os.mkdir(folder)
self.logger.info("new empty directory %s created", folder)
else:
self.logger.info("folder %s exists, do not need to create again.", folder)
def load_raw_data(self):
self.logger.info('loading raw data')
if not self.args['is_use_node_feature']:
self.transform = T.Compose([
T.OneHotDegree(-2, cat=False) # use only node degree as node feature.
])
else:
self.transform = None
if self.dataset_name in ["cora", "pubmed", "citeseer"]:
dataset = Planetoid(config.RAW_DATA_PATH, self.dataset_name, transform=T.NormalizeFeatures())
labels = np.unique(dataset.data.y.numpy())
elif self.dataset_name in ["Coauthor_CS", "Coauthor_Phys"]:
if self.dataset_name == "Coauthor_Phys":
dataset = Coauthor(config.RAW_DATA_PATH, name="Physics", pre_transform=self.transform)
else:
dataset = Coauthor(config.RAW_DATA_PATH, name="CS", pre_transform=self.transform)
else:
raise Exception('unsupported dataset')
data = dataset[0]
return data
def save_train_data(self, train_data):
self.logger.info('saving train data')
pickle.dump(train_data, open(self.train_data_file, 'wb'))
def load_train_data(self):
self.logger.info('loading train data')
return pickle.load(open(self.train_data_file, 'rb'))
def save_train_graph(self, train_data):
self.logger.info('saving train graph')
pickle.dump(train_data, open(self.train_graph_file, 'wb'))
def load_train_graph(self):
self.logger.info('loading train graph')
return pickle.load(open(self.train_graph_file, 'rb'))
def save_train_test_split(self, train_indices, test_indices):
self.logger.info('saving train test split data')
pickle.dump((train_indices, test_indices), open(self.train_test_split_file, 'wb'))
def load_train_test_split(self):
self.logger.info('loading train test split data')
return pickle.load(open(self.train_test_split_file, 'rb'))
def save_embeddings(self, embeddings):
self.logger.info('saving embedding data')
pickle.dump(embeddings, open(self.embedding_file, 'wb'))
def load_embeddings(self):
self.logger.info('loading embedding data')
return pickle.load(open(self.embedding_file, 'rb'))
def save_community_data(self, community_to_node, suffix=''):
self.logger.info('saving community data')
pickle.dump(community_to_node, open(self.community_file + suffix, 'wb'))
def load_community_data(self, suffix=''):
self.logger.info('loading community data from: %s'%(self.community_file + suffix))
return pickle.load(open(self.community_file + suffix, 'rb'))
def c2n_to_n2c(self, community_to_node):
node_list = []
for i in range(self.num_shards):
node_list.extend(list(community_to_node.values())[i])
node_to_community = {}
for comm, nodes in dict(community_to_node).items():
for node in nodes:
# Map node id back to original graph
# node_to_community[node_list[node]] = comm
node_to_community[node] = comm
return node_to_community
def save_shard_data(self, shard_data):
self.logger.info('saving shard data')
pickle.dump(shard_data, open(self.shard_file, 'wb'))
def load_shard_data(self):
self.logger.info('loading shard data')
return pickle.load(open(self.shard_file, 'rb'))
def load_unlearned_data(self, suffix):
file_path = '_'.join((self.unlearned_file, suffix))
self.logger.info('loading unlearned data from %s' % file_path)
return pickle.load(open(file_path, 'rb'))
def save_unlearned_data(self, data, suffix):
self.logger.info('saving unlearned data %s' % suffix)
pickle.dump(data, open('_'.join((self.unlearned_file, suffix)), 'wb'))
def save_target_model(self, run, model, shard, suffix=''):
if self.args["exp"] in ["node_edge_unlearning", "attack_unlearning"]:
model_path = '_'.join((self.target_model_file, str(shard), str(run), str(self.args['num_unlearned_nodes']))) + suffix
model.save_model(model_path)
else:
model.save_model(self.target_model_file + '_' + str(shard) + '_' + str(run))
# model.save_model(self.target_model_file + '_' + str(shard))
def load_target_model(self, run, model, shard, suffix=''):
if self.args["exp"] == "node_edge_unlearning":
model.load_model(
'_'.join((self.target_model_file, str(shard), str(run), str(self.args['num_unlearned_nodes']))))
elif self.args["exp"] == "attack_unlearning":
model_path = '_'.join((self.target_model_file, str(shard), str(run), str(self.args['num_unlearned_nodes']))) + suffix
print("loading target model from:" + model_path)
device = torch.device('cpu')
model.load_model(model_path)
model.device=device
else:
# model.load_model(self.target_model_file + '_' + str(shard) + '_' + str(run))
model.load_model(self.target_model_file + '_' + str(shard) + '_' + str(0))
def save_optimal_weight(self, weight, run):
torch.save(weight, self.optimal_weight_file + '_' + str(run))
def load_optimal_weight(self, run):
return torch.load(self.optimal_weight_file + '_' + str(run))
def save_posteriors(self, posteriors, run, suffix=''):
torch.save(posteriors, self.posteriors_file + '_' + str(run) + suffix)
def load_posteriors(self, run):
return torch.load(self.posteriors_file + '_' + str(run))
def _extract_embedding_method(self, partition_method):
return partition_method.split('_')[0]
| 9,583 | 44.421801 | 129 | py |
Graph-Unlearning | Graph-Unlearning-main/lib_dataset/__init__.py | 0 | 0 | 0 | py |
|
ZINBAE | ZINBAE-master/ZINBAE.py | """
Implementation of ZINBAE model
"""
from time import time
import numpy as np
from keras.models import Model
import keras.backend as K
from keras.engine.topology import Layer, InputSpec
from keras.layers import Dense, Input, GaussianNoise, Layer, Activation, Lambda, Multiply, BatchNormalization, Reshape, Concatenate, RepeatVector, Permute
from keras.models import Model
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils.vis_utils import plot_model
from keras.callbacks import EarlyStopping
from sklearn.cluster import KMeans
from sklearn import metrics
import h5py
import scanpy.api as sc
from layers import ConstantDispersionLayer, SliceLayer, ColWiseMultLayer
from loss import poisson_loss, NB, ZINB, mse_loss_v2
from preprocess import read_dataset, normalize
import tensorflow as tf
from numpy.random import seed
seed(2211)
from tensorflow import set_random_seed
set_random_seed(2211)
MeanAct = lambda x: tf.clip_by_value(K.exp(x), 1e-5, 1e6)
DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)
def mean_MSE(x_impute, x_real):
return np.mean(np.square(np.log(x_impute+1)-np.log(x_real+1)))
def imputate_error(x_impute, x_real, x_raw):
x_impute_log = np.log(x_impute[(x_raw-x_real)<0]+1)
x_real_log = np.log(x_real[(x_raw-x_real)<0]+1)
return np.sum(np.abs(x_impute_log-x_real_log))/np.sum(x_real_log>0)
def autoencoder(dims, noise_sd=0, init='glorot_uniform', act='relu'):
"""
Fully connected auto-encoder model, symmetric.
Arguments:
dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
act: activation, not applied to Input, Hidden and Output layers
return:
Model of autoencoder
"""
n_stacks = len(dims) - 1
# input
sf_layer = Input(shape=(1,), name='size_factors')
x = Input(shape=(dims[0],), name='counts')
h = x
h = GaussianNoise(noise_sd, name='input_noise')(h)
# internal layers in encoder
for i in range(n_stacks-1):
h = Dense(dims[i + 1], kernel_initializer=init, name='encoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='encoder_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_act_%d' % i)(h)
# hidden layer
h = Dense(dims[-1], kernel_initializer=init, name='encoder_hidden')(h) # hidden layer, features are extracted from here
h = BatchNormalization(center=True, scale=False, name='encoder_hidden_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_hidden_act')(h)
# internal layers in decoder
for i in range(n_stacks-1, 0, -1):
h = Dense(dims[i], kernel_initializer=init, name='decoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='decoder_batchnorm_%d' % i)(h)
h = Activation(act, name='decoder_act_%d' % i)(h)
# output
pi = Dense(dims[0], activation='sigmoid', kernel_initializer=init, name='pi')(h)
disp = Dense(dims[0], activation=DispAct, kernel_initializer=init, name='dispersion')(h)
mean = Dense(dims[0], activation=MeanAct, kernel_initializer=init, name='mean')(h)
output = ColWiseMultLayer(name='output')([mean, sf_layer])
output = SliceLayer(0, name='slice')([output, disp, pi])
return Model(inputs=[x, sf_layer], outputs=output)
### Gumbel-softmax layer ###
def sampling_gumbel(shape, eps=1e-8):
u = tf.random_uniform(shape, minval=0., maxval=1)
return -tf.log(-tf.log(u+eps)+eps)
def compute_softmax(logits,temp):
z = logits + sampling_gumbel( K.shape(logits) )
return K.softmax( z / temp )
def gumbel_softmax(args):
logits,temp = args
y = compute_softmax(logits,temp)
return y
class ZINB_AE(object):
def __init__(self,
dims,
noise_sd=0,
ridge=0,
debug=False,
eps = 1e-20):
self.dims = dims
self.input_dim = dims[0]
self.n_stacks = len(self.dims) - 1
self.noise_sd = noise_sd
self.act = 'relu'
self.ridge = ridge
self.debug = debug
self.eps = eps
self.autoencoder = autoencoder(self.dims, noise_sd=self.noise_sd, act = self.act)
pi = self.autoencoder.get_layer(name='pi').output
disp = self.autoencoder.get_layer(name='dispersion').output
zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
self.zinb_loss = zinb.loss
# zero-inflated outputs
tau_input = Input(shape=(self.dims[0],), name='tau_input')
pi_ = self.autoencoder.get_layer('pi').output
mean_ = self.autoencoder.output
pi_log_ = Lambda(lambda x:tf.log(x+self.eps))(pi_)
nondrop_pi_log_ = Lambda(lambda x:tf.log(1-x+self.eps))(pi_)
pi_log_ = Reshape( target_shape=(self.dims[0],1) )(pi_log_)
nondrop_pi_log_ = Reshape( target_shape=(self.dims[0],1) )(nondrop_pi_log_)
logits = Concatenate(axis=-1)([pi_log_,nondrop_pi_log_])
temp_ = RepeatVector( 2 )(tau_input)
temp_ = Permute( (2,1) )(temp_)
samples_ = Lambda( gumbel_softmax,output_shape=(self.dims[0],2,) )( [logits,temp_] )
samples_ = Lambda( lambda x:x[:,:,1] )(samples_)
samples_ = Reshape( target_shape=(self.dims[0],) )(samples_)
output_ = Multiply(name='ZI_output')([mean_, samples_])
self.model = Model(inputs=[self.autoencoder.input[0], self.autoencoder.input[1], tau_input],
outputs=[output_, self.autoencoder.output])
def pretrain(self, x, x_count, batch_size=256, epochs=200, optimizer='adam', ae_file='ae_weights.h5'):
print('...Pretraining autoencoder...')
self.autoencoder.compile(loss=self.zinb_loss, optimizer=optimizer)
es = EarlyStopping(monitor="loss", patience=50, verbose=1)
self.autoencoder.fit(x=x, y=x_count, batch_size=batch_size, epochs=epochs, callbacks=[es], shuffle=True)
self.autoencoder.save_weights(ae_file)
print('Pretrained weights are saved to ./' + str(ae_file))
self.pretrained = True
def fit(self, x, x_count, batch_size=256, maxiter=2e3, ae_weights=None,
loss_weights=[0.01, 1], optimizer='adam', model_file='model_weight.h5'):
self.model.compile(loss={'ZI_output': mse_loss_v2, 'slice': self.zinb_loss}, loss_weights=loss_weights, optimizer=optimizer)
if not self.pretrained and ae_weights is None:
print('...pretraining autoencoders using default hyper-parameters:')
print(' optimizer=\'adam\'; epochs=200')
self.pretrain(x, x_count, batch_size)
self.pretrained = True
elif ae_weights is not None:
self.autoencoder.load_weights(ae_weights)
print('ae_weights is loaded successfully.')
# anneal tau
tau0 = 1.
min_tau = 0.5
anneal_rate = 0.0003
tau = tau0
# es = EarlyStopping(monitor="loss", patience=20, verbose=1)
for e in range(maxiter):
if e % 100 == 0:
tau = max( tau0*np.exp( -anneal_rate * e),min_tau )
tau_in = np.ones( x[0].shape,dtype='float32' ) * tau
print(tau)
print("Epoch %d/%d" % (e, maxiter))
self.model.fit(x=[x[0], x[1], tau_in], y=x_count, batch_size=batch_size, epochs=1, shuffle=True)
self.model.save_weights(model_file)
if __name__ == "__main__":
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser(description='train',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--data_file', default='data.h5')
parser.add_argument('--pretrain_epochs', default=300, type=int)
parser.add_argument('--max_iters', default=2000, type=int)
parser.add_argument('--gamma', default=.01, type=float)
parser.add_argument('--ae_weights', default=None)
parser.add_argument('--ae_weight_file', default='ae_weights.h5')
parser.add_argument('--model_weight_file', default='model_weights.h5')
args = parser.parse_args()
# load dataset
optimizer = Adam(amsgrad=True)
data_mat = h5py.File(args.data_file)
x = np.array(data_mat['X'])
y = np.array(data_mat['Y'])
true_count = np.array(data_mat['true_count'])
data_mat.close()
x = np.floor(x)
# preprocessing scRNA-seq read counts matrix
adata = sc.AnnData(x)
adata.obs['Group'] = y
adata = read_dataset(adata,
transpose=False,
test_split=False,
copy=True)
adata = normalize(adata,
size_factors=True,
normalize_input=True,
logtrans_input=True)
input_size = adata.n_vars
print(adata.X.shape)
print(y.shape)
x_sd = adata.X.std(0)
x_sd_median = np.median(x_sd)
print("median of gene sd: %.5f" % x_sd_median)
print(args)
zinbae_model = ZINB_AE(dims=[input_size, 64, 32], noise_sd=2.5)
zinbae_model.autoencoder.summary()
zinbae_model.model.summary()
# Pretrain autoencoders before clustering
if args.ae_weights is None:
zinbae_model.pretrain(x=[adata.X, adata.obs.size_factors], x_count=adata.raw.X, batch_size=args.batch_size, epochs=args.pretrain_epochs,
optimizer=optimizer, ae_file=args.ae_weight_file)
zinbae_model.fit(x=[adata.X, adata.obs.size_factors], x_count=[adata.raw.X, adata.raw.X], batch_size=args.batch_size, ae_weights=args.ae_weights,
maxiter=args.max_iters, loss_weights=[args.gamma, 1], optimizer=optimizer, model_file=args.model_weight_file)
# Impute error
x_impute = zinbae_model.autoencoder.predict(x=[adata.X, adata.obs.size_factors])
raw_error = imputate_error(adata.raw.X, true_count, x_raw=adata.raw.X)
imputation_error = imputate_error(x_impute, true_count, x_raw=adata.raw.X)
print("Before imputation error: %.4f, after imputation error: %.4f" % (raw_error, imputation_error))
| 10,280 | 39.636364 | 154 | py |
ZINBAE | ZINBAE-master/loss.py | import numpy as np
import tensorflow as tf
from keras import backend as K
def _nan2zero(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x), x)
def _nan2inf(x):
return tf.where(tf.is_nan(x), tf.zeros_like(x)+np.inf, x)
def _nelem(x):
nelem = tf.reduce_sum(tf.cast(~tf.is_nan(x), tf.float32))
return tf.cast(tf.where(tf.equal(nelem, 0.), 1., nelem), x.dtype)
def _reduce_mean(x):
nelem = _nelem(x)
x = _nan2zero(x)
return tf.divide(tf.reduce_sum(x), nelem)
def mse_loss(y_true, y_pred):
ret = tf.square(y_pred - y_true)
return _reduce_mean(ret)
def mse_loss_v2(y_true, y_pred):
y_true = tf.log(y_true+1)
y_pred = tf.log(y_pred+1)
ret = tf.square(y_pred - y_true)
return _reduce_mean(ret)
class NB(object):
def __init__(self, theta=None, masking=False, scope='nbinom_loss/',
scale_factor=1.0, debug=False):
# for numerical stability
self.eps = 1e-10
self.scale_factor = scale_factor
self.debug = debug
self.scope = scope
self.masking = masking
self.theta = theta
def loss(self, y_true, y_pred, mean=True):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
if self.masking:
nelem = _nelem(y_true)
y_true = _nan2zero(y_true)
# Clip theta
theta = tf.minimum(self.theta, 1e6)
t1 = tf.lgamma(theta+eps) + tf.lgamma(y_true+1.0) - tf.lgamma(y_true+theta+eps)
t2 = (theta+y_true) * tf.log(1.0 + (y_pred/(theta+eps))) + (y_true * (tf.log(theta+eps) - tf.log(y_pred+eps)))
if self.debug:
assert_ops = [
tf.verify_tensor_all_finite(y_pred, 'y_pred has inf/nans'),
tf.verify_tensor_all_finite(t1, 't1 has inf/nans'),
tf.verify_tensor_all_finite(t2, 't2 has inf/nans')]
tf.summary.histogram('t1', t1)
tf.summary.histogram('t2', t2)
with tf.control_dependencies(assert_ops):
final = t1 + t2
else:
final = t1 + t2
final = _nan2inf(final)
if mean:
if self.masking:
final = tf.divide(tf.reduce_sum(final), nelem)
else:
final = tf.reduce_mean(final)
return final
class ZINB(NB):
def __init__(self, pi, ridge_lambda=0.0, scope='zinb_loss/', **kwargs):
super().__init__(scope=scope, **kwargs)
self.pi = pi
self.ridge_lambda = ridge_lambda
def loss(self, y_true, y_pred, mean=True):
scale_factor = self.scale_factor
eps = self.eps
with tf.name_scope(self.scope):
# reuse existing NB neg.log.lik.
# mean is always False here, because everything is calculated
# element-wise. we take the mean only in the end
nb_case = super().loss(y_true, y_pred, mean=False) - tf.log(1.0-self.pi+eps)
y_true = tf.cast(y_true, tf.float32)
y_pred = tf.cast(y_pred, tf.float32) * scale_factor
theta = tf.minimum(self.theta, 1e6)
zero_nb = tf.pow(theta/(theta+y_pred+eps), theta)
zero_case = -tf.log(self.pi + ((1.0-self.pi)*zero_nb)+eps)
result = tf.where(tf.less(y_true, 1e-8), zero_case, nb_case)
ridge = self.ridge_lambda*tf.square(self.pi)
result += ridge
if mean:
if self.masking:
result = _reduce_mean(result)
else:
result = tf.reduce_mean(result)
result = _nan2inf(result)
if self.debug:
tf.summary.histogram('nb_case', nb_case)
tf.summary.histogram('zero_nb', zero_nb)
tf.summary.histogram('zero_case', zero_case)
tf.summary.histogram('ridge', ridge)
return result
| 4,141 | 30.142857 | 122 | py |
ZINBAE | ZINBAE-master/layers.py | from keras.engine.topology import Layer
from keras.layers import Lambda
from keras import backend as K
import tensorflow as tf
class ConstantDispersionLayer(Layer):
'''
An identity layer which allows us to inject extra parameters
such as dispersion to Keras models
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
def build(self, input_shape):
self.theta = self.add_weight(shape=(1, input_shape[1]),
initializer='zeros',
trainable=True,
name='theta')
self.theta_exp = tf.clip_by_value(K.exp(self.theta), 1e-3, 1e4)
super().build(input_shape)
def call(self, x):
return tf.identity(x)
def compute_output_shape(self, input_shape):
return input_shape
class SliceLayer(Layer):
def __init__(self, index, **kwargs):
self.index = index
super().__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise ValueError('Input should be a list')
super().build(input_shape)
def call(self, x):
assert isinstance(x, list), 'SliceLayer input is not a list'
return x[self.index]
def compute_output_shape(self, input_shape):
return input_shape[self.index]
nan2zeroLayer = Lambda(lambda x: tf.where(tf.is_nan(x), tf.zeros_like(x), x))
ColWiseMultLayer = lambda name: Lambda(lambda l: l[0]*(tf.matmul(tf.reshape(l[1], (-1,1)),
tf.ones((1, l[0].get_shape()[1]),
dtype=l[1].dtype))),
name=name)
| 1,798 | 32.314815 | 98 | py |
ZINBAE | ZINBAE-master/ZINBAE0.py | """
Implementation of scDeepCluster for scRNA-seq data
"""
from time import time
import numpy as np
from keras.models import Model
import keras.backend as K
from keras.engine.topology import Layer, InputSpec
from keras.layers import Dense, Input, GaussianNoise, Layer, Activation, Lambda, Multiply, BatchNormalization, Reshape, Concatenate, RepeatVector, Permute
from keras.models import Model
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils.vis_utils import plot_model
from keras.callbacks import EarlyStopping
from sklearn.cluster import KMeans
from sklearn import metrics
import h5py
import scanpy.api as sc
from layers import ConstantDispersionLayer, SliceLayer, ColWiseMultLayer
from loss import poisson_loss, NB, ZINB, mse_loss_v2
from preprocess import read_dataset, normalize
import tensorflow as tf
from numpy.random import seed
seed(2211)
from tensorflow import set_random_seed
set_random_seed(2211)
MeanAct = lambda x: tf.clip_by_value(K.exp(x), 1e-5, 1e6)
DispAct = lambda x: tf.clip_by_value(tf.nn.softplus(x), 1e-4, 1e4)
def mean_MSE(x_impute, x_real):
return np.mean(np.square(np.log(x_impute+1)-np.log(x_real+1)))
def imputate_error(x_impute, x_real, x_raw):
x_impute_log = np.log(x_impute[(x_raw-x_real)<0]+1)
x_real_log = np.log(x_real[(x_raw-x_real)<0]+1)
return np.sum(np.abs(x_impute_log-x_real_log))/np.sum(x_real_log>0)
def autoencoder(dims, noise_sd=0, init='glorot_uniform', act='relu'):
"""
Fully connected auto-encoder model, symmetric.
Arguments:
dims: list of number of units in each layer of encoder. dims[0] is input dim, dims[-1] is units in hidden layer.
The decoder is symmetric with encoder. So number of layers of the auto-encoder is 2*len(dims)-1
act: activation, not applied to Input, Hidden and Output layers
return:
Model of autoencoder
"""
n_stacks = len(dims) - 1
# input
sf_layer = Input(shape=(1,), name='size_factors')
x = Input(shape=(dims[0],), name='counts')
h = x
h = GaussianNoise(noise_sd, name='input_noise')(h)
# internal layers in encoder
for i in range(n_stacks-1):
h = Dense(dims[i + 1], kernel_initializer=init, name='encoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='encoder_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_act_%d' % i)(h)
# hidden layer
h = Dense(dims[-1], kernel_initializer=init, name='encoder_hidden')(h) # hidden layer, features are extracted from here
h = BatchNormalization(center=True, scale=False, name='encoder_hidden_batchnorm_%d' % i)(h)
h = Activation(act, name='encoder_hidden_act')(h)
# internal layers in decoder
for i in range(n_stacks-1, 0, -1):
h = Dense(dims[i], kernel_initializer=init, name='decoder_%d' % i)(h)
h = BatchNormalization(center=True, scale=False, name='decoder_batchnorm_%d' % i)(h)
h = Activation(act, name='decoder_act_%d' % i)(h)
# output
pi = Dense(dims[0], activation='sigmoid', kernel_initializer=init, name='pi')(h)
disp = Dense(dims[0], activation=DispAct, kernel_initializer=init, name='dispersion')(h)
mean = Dense(dims[0], activation=MeanAct, kernel_initializer=init, name='mean')(h)
output = ColWiseMultLayer(name='output')([mean, sf_layer])
output = SliceLayer(0, name='slice')([output, disp, pi])
return Model(inputs=[x, sf_layer], outputs=output)
class ZINB_AE0(object):
def __init__(self,
dims,
noise_sd=0,
ridge=0,
debug=False,
eps = 1e-20):
self.dims = dims
self.input_dim = dims[0]
self.n_stacks = len(self.dims) - 1
self.noise_sd = noise_sd
self.act = 'relu'
self.ridge = ridge
self.debug = debug
self.eps = eps
self.autoencoder = autoencoder(self.dims, noise_sd=self.noise_sd, act = self.act)
self.pi = pi = self.autoencoder.get_layer(name='pi').output
self.disp = disp = self.autoencoder.get_layer(name='dispersion').output
zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
self.zinb_loss = zinb.loss
self.model = Model(inputs=[self.autoencoder.input[0], self.autoencoder.input[1]],
outputs=self.autoencoder.output)
def pretrain(self, x, x_count, batch_size=256, epochs=200, optimizer='adam', ae_file='ae_weights.h5'):
print('...Pretraining autoencoder...')
self.autoencoder.compile(loss=self.zinb_loss, optimizer=optimizer)
es = EarlyStopping(monitor="loss", patience=50, verbose=1)
self.autoencoder.fit(x=x, y=x_count, batch_size=batch_size, epochs=epochs, callbacks=[es], shuffle=True)
self.autoencoder.save_weights(ae_file)
print('Pretrained weights are saved to ./' + str(ae_file))
self.pretrained = True
def fit(self, x, x_count, batch_size=256, maxiter=2e3, ae_weights=None,
loss_weights=0.1, optimizer='adam', model_file='model_weight.h5'):
class custom_loss(object):
def __init__(self, pi=None, zinb_loss=None):
self.pi = pi
self.zinb_loss = zinb_loss
def custom_loss(self, y_true, y_pred):
loss1 = mse_loss_v2(y_true, (1-self.pi)*y_pred)
loss2 = self.zinb_loss(y_true, y_pred)
return loss1*loss_weights + loss2
loss = custom_loss(self.pi, self.zinb_loss)
self.model.compile(loss=loss.custom_loss, optimizer=optimizer)
if not self.pretrained and ae_weights is None:
print('...pretraining autoencoders using default hyper-parameters:')
print(' optimizer=\'adam\'; epochs=200')
self.pretrain(x, x_count, batch_size)
self.pretrained = True
elif ae_weights is not None:
self.autoencoder.load_weights(ae_weights)
print('ae_weights is loaded successfully.')
self.model.fit(x=[x[0], x[1]], y=x_count, batch_size=batch_size, epochs=maxiter, shuffle=True)
self.model.save_weights(model_file)
if __name__ == "__main__":
# setting the hyper parameters
import argparse
parser = argparse.ArgumentParser(description='train',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--batch_size', default=256, type=int)
parser.add_argument('--data_file', default='data.h5')
parser.add_argument('--pretrain_epochs', default=300, type=int)
parser.add_argument('--max_iters', default=500, type=int)
parser.add_argument('--gamma', default=.01, type=float)
parser.add_argument('--ae_weights', default=None)
parser.add_argument('--ae_weight_file', default='ae_weights.h5')
parser.add_argument('--model_weight_file', default='model_weights.h5')
args = parser.parse_args()
# load dataset
optimizer = Adam(amsgrad=True)
data_mat = h5py.File(args.data_file)
x = np.array(data_mat['X'])
y = np.array(data_mat['Y'])
true_count = np.array(data_mat['true_count'])
data_mat.close()
x = np.floor(x)
# preprocessing scRNA-seq read counts matrix
adata = sc.AnnData(x)
adata.obs['Group'] = y
adata = read_dataset(adata,
transpose=False,
test_split=False,
copy=True)
adata = normalize(adata,
size_factors=True,
normalize_input=True,
logtrans_input=True)
input_size = adata.n_vars
print(adata.X.shape)
print(y.shape)
x_sd = adata.X.std(0)
x_sd_median = np.median(x_sd)
print("median of gene sd: %.5f" % x_sd_median)
print(args)
zinbae0_model = ZINB_AE(dims=[input_size, 64, 32], noise_sd=2.5)
zinbae0_model.autoencoder.summary()
zinbae0_model.model.summary()
# Pretrain autoencoders before clustering
if args.ae_weights is None:
zinbae0_model.pretrain(x=[adata.X, adata.obs.size_factors], x_count=adata.raw.X, batch_size=args.batch_size, epochs=args.pretrain_epochs,
optimizer=optimizer, ae_file=args.ae_weight_file)
zinbae0_model.fit(x=[adata.X, adata.obs.size_factors], x_count=adata.raw.X, batch_size=args.batch_size, ae_weights=args.ae_weights,
maxiter=args.max_iters, loss_weights=args.gamma, optimizer=optimizer, model_file=args.model_weight_file)
# Impute error
x_impute = zinbae0_model.autoencoder.predict(x=[adata.X, adata.obs.size_factors])
raw_error = imputate_error(adata.raw.X, true_count, x_raw=adata.raw.X)
imputation_error = imputate_error(x_impute, true_count, x_raw=adata.raw.X)
print("Before imputation error: %.4f, after imputation error: %.4f" % (raw_error, imputation_error))
| 8,888 | 39.040541 | 154 | py |
ZINBAE | ZINBAE-master/preprocess.py | # Copyright 2017 Goekcen Eraslan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle, os, numbers
import numpy as np
import scipy as sp
import pandas as pd
import scanpy.api as sc
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import scale
#TODO: Fix this
class AnnSequence:
def __init__(self, matrix, batch_size, sf=None):
self.matrix = matrix
if sf is None:
self.size_factors = np.ones((self.matrix.shape[0], 1),
dtype=np.float32)
else:
self.size_factors = sf
self.batch_size = batch_size
def __len__(self):
return len(self.matrix) // self.batch_size
def __getitem__(self, idx):
batch = self.matrix[idx*self.batch_size:(idx+1)*self.batch_size]
batch_sf = self.size_factors[idx*self.batch_size:(idx+1)*self.batch_size]
# return an (X, Y) pair
return {'count': batch, 'size_factors': batch_sf}, batch
def read_dataset(adata, transpose=False, test_split=False, copy=False):
if isinstance(adata, sc.AnnData):
if copy:
adata = adata.copy()
elif isinstance(adata, str):
adata = sc.read(adata)
else:
raise NotImplementedError
norm_error = 'Make sure that the dataset (adata.X) contains unnormalized count data.'
assert 'n_count' not in adata.obs, norm_error
if adata.X.size < 50e6: # check if adata.X is integer only if array is small
if sp.sparse.issparse(adata.X):
assert (adata.X.astype(int) != adata.X).nnz == 0, norm_error
else:
assert np.all(adata.X.astype(int) == adata.X), norm_error
if transpose: adata = adata.transpose()
if test_split:
train_idx, test_idx = train_test_split(np.arange(adata.n_obs), test_size=0.1, random_state=42)
spl = pd.Series(['train'] * adata.n_obs)
spl.iloc[test_idx] = 'test'
adata.obs['DCA_split'] = spl.values
else:
adata.obs['DCA_split'] = 'train'
adata.obs['DCA_split'] = adata.obs['DCA_split'].astype('category')
print('### Autoencoder: Successfully preprocessed {} genes and {} cells.'.format(adata.n_vars, adata.n_obs))
return adata
def normalize(adata, filter_min_counts=True, size_factors=True, normalize_input=True, logtrans_input=True):
if filter_min_counts:
sc.pp.filter_genes(adata, min_counts=1)
sc.pp.filter_cells(adata, min_counts=1)
if size_factors or normalize_input or logtrans_input:
adata.raw = adata.copy()
else:
adata.raw = adata
if size_factors:
sc.pp.normalize_per_cell(adata)
adata.obs['size_factors'] = adata.obs.n_counts / np.median(adata.obs.n_counts)
else:
adata.obs['size_factors'] = 1.0
if logtrans_input:
sc.pp.log1p(adata)
if normalize_input:
sc.pp.scale(adata)
return adata
def read_genelist(filename):
genelist = list(set(open(filename, 'rt').read().strip().split('\n')))
assert len(genelist) > 0, 'No genes detected in genelist file'
print('### Autoencoder: Subset of {} genes will be denoised.'.format(len(genelist)))
return genelist
def write_text_matrix(matrix, filename, rownames=None, colnames=None, transpose=False):
if transpose:
matrix = matrix.T
rownames, colnames = colnames, rownames
pd.DataFrame(matrix, index=rownames, columns=colnames).to_csv(filename,
sep='\t',
index=(rownames is not None),
header=(colnames is not None),
float_format='%.6f')
def read_pickle(inputfile):
return pickle.load(open(inputfile, "rb")) | 4,580 | 33.969466 | 112 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Pure Python/testing_parallel_streams.py | from scipy.stats import ks_2samp
from IKS import IKS
import numpy as np
from time import time
from collections import deque
initial_A = np.random.normal(loc = 0, scale = 1, size = 500)
initial_B = np.random.normal(loc = 1, scale = 1, size = 500)
stream_A = np.random.normal(loc = 0, scale = 1, size = 5000)
stream_B = np.random.normal(loc = 1, scale = 1, size = 5000)
######################
## TEST IKS
######################
start = time()
iks_statistics = [] # collect statistics generated by IKS
iks = IKS()
sliding_A = deque(initial_A) # sliding window
sliding_B = deque(initial_B) # sliding window
for a, b in zip(initial_A, initial_B):
iks.Add(a, 0)
iks.Add(b, 1)
# process sliding window
for a, b in zip(stream_A, stream_B):
iks.Remove(sliding_A.popleft(), 0)
iks.Remove(sliding_B.popleft(), 1)
sliding_A.append(a)
sliding_B.append(b)
iks.Add(a, 0)
iks.Add(b, 1)
iks_statistics.append(iks.KS())
finish = time()
print(f'Elapsed time for IKS to process stream: {round(finish - start, 2)} sec')
######################
## TEST ks_2samp
######################
start = time()
ks_2samp_statistics = [] # gather all statistics generated by ks_2samp
sliding_A = deque(initial_A) # sliding window
sliding_B = deque(initial_B) # sliding window
for a, b in zip(stream_A, stream_B):
sliding_A.popleft()
sliding_B.popleft()
sliding_A.append(a)
sliding_B.append(b)
ks_2samp_statistics.append(ks_2samp(sliding_A, sliding_B).statistic)
finish = time()
print(f'Elapsed time for ks_2samp to process stream: {round(finish - start, 2)} sec')
max_diff = np.max(np.abs(np.array(iks_statistics) - np.array(ks_2samp_statistics)))
print(f'Maximum difference between IKS and ks_2samp: {max_diff}') | 1,724 | 24.746269 | 85 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Pure Python/IKS.py | from Treap import Treap
from math import log
class IKS:
def __init__(self):
self.treap = None
self.n = [0, 0]
@staticmethod
def KSThresholdForPValue(pvalue, N):
'''Threshold for KS Test given a p-value
Args:
pval (float): p-value.
N (int): the size of the samples.
Returns:
Threshold t to compare groups 0 and 1. The null-hypothesis is discarded if KS() > t.
'''
ca = (-0.5 * log(pvalue)) ** 0.5
return ca * (2.0 * N / N ** 2)
@staticmethod
def CAForPValue(pvalue):
'''ca for KS Test given a p-value
Args:
pval (float): p-value.
Returns:
Threshold the "ca" that can be used to compute a threshold for KS().
'''
return (-0.5 * log(pvalue)) ** 0.5
def KS(self):
'''Kolmogorov-Smirnov statistic. Both groups must have the same number of observations.
Returns:
The KS statistic D.
'''
assert(self.n[0] == self.n[1])
N = self.n[0]
if N == 0:
return 0
return max(self.treap.max_value, -self.treap.min_value) / N
def Kuiper(self):
'''Kuiper statistic. Both groups must have the same number of observations.
Returns:
The Kuiper statistic.
'''
assert(self.n[0] == self.n[1])
N = self.n[0]
if N == 0:
return 0
return (self.treap.max_value - self.treap.min_value) / N
def Add(self, obs, group):
'''Insert new observation into one of the groups.
Args:
obs: the value of the obseration. Tip: a tuple (actual value, random value) is recommended when there is overlap between groups or if values are not guaranteed to be mostly unique.
group (int): which group the observation belongs to. Must be either 0 or 1.
'''
group = 0 if group == 2 else group
assert(group == 0 or group == 1)
key = (obs, group)
self.n[group] += 1
left, left_g, right, val = None, None, None, None
left, right = Treap.SplitKeepRight(self.treap, key)
left, left_g = Treap.SplitGreatest(left)
val = 0 if left_g is None else left_g.value
left = Treap.Merge(left, left_g)
right = Treap.Merge(Treap(key, val), right)
Treap.SumAll(right, 1 if group == 0 else -1)
self.treap = Treap.Merge(left, right)
def Remove(self, obs, group):
'''Remove observation from one of the groups.
Args:
obs: the value of the obseration. Must be identical to a previously inserted observation (including the random element of a tuple, if this was the case).
group (int): which group the observation belongs to. Must be either 0 or 1.
'''
group = 0 if group == 2 else group
assert(group == 0 or group == 1)
key = (obs, group)
self.n[group] -= 1
left, right, right_l = None, None, None
left, right = Treap.SplitKeepRight(self.treap, key)
right_l, right = Treap.SplitSmallest(right)
if right_l is not None and right_l.key == key:
Treap.SumAll(right, -1 if group == 0 else 1)
else:
right = Treap.Merge(right_l, right)
self.treap = Treap.Merge(left, right)
def Test(self, ca = 1.95):
'''Test whether the reference and sliding window follow the different probability distributions according to KS Test.
Args:
ca: ca is a parameter used to calculate the threshold for the Kolmogorov-Smirnov statistic. The default value corresponds to a p-value of 0.001. Use IKS.CAForPValue to obtain an appropriate ca.
Returns:
True if we **reject** the null-hypothesis that states that both windows have the same distribution. In other words, we can consider that the windows have now different distributions.
'''
ca = ca or 1.95
n = self.n[0]
return self.KS() > ca * (2 * n / n ** 2) ** 0.5
IKS.AddObservation = IKS.Add
IKS.RemoveObservation = IKS.Remove
| 3,778 | 29.475806 | 199 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Pure Python/testing_single_stream_rnd_factor.py | from scipy.stats import ks_2samp
from IKS import IKS
import numpy as np
from time import time
from itertools import chain
from random import random
from collections import deque
initial = np.random.normal(loc = 0, scale = 1, size = 500)
stream = list(chain(*[np.random.normal(loc = 1.0 * (i % 2), scale = 1, size = 500) for i in range(10)]))
######################
## TEST IKS
######################
start = time()
iks_statistics = [] # collect statistics generated by IKS
iks = IKS() # group 0 = reference; group 1 = sliding
sliding = deque()
for val in initial:
iks.Add((val, random()), 0)
wrnd = (val, random()) # we only need to keep RND component for values in the sliding window
iks.Add(wrnd, 1)
sliding.append(wrnd)
# process sliding window
for val in stream:
iks.Remove(sliding.popleft(), 1)
wrnd = (val, random())
iks.Add(wrnd, 1)
sliding.append(wrnd)
iks_statistics.append(iks.KS())
finish = time()
print(f'Elapsed time for IKS to process stream: {round(finish - start, 2)} sec')
######################
## TEST ks_2samp
######################
start = time()
ks_2samp_statistics = [] # collect statistics gerated by ks_2samp
sliding = deque(initial) # sliding window
for val in stream:
sliding.popleft()
sliding.append(val)
ks_2samp_statistics.append(ks_2samp(initial, sliding).statistic)
finish = time()
print(f'Elapsed time for ks_2samp to process stream: {round(finish - start, 2)} sec')
max_diff = np.max(np.abs(np.array(iks_statistics) - np.array(ks_2samp_statistics)))
print(f'Maximum difference between IKS and ks_2samp: {max_diff}') | 1,592 | 24.285714 | 104 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Pure Python/testing_batch.py | from scipy.stats import ks_2samp
from IKS import IKS
import numpy as np
group_A = np.random.normal(loc = 0, scale = 1, size = 100)
group_B = np.random.normal(loc = 1, scale = 1, size = 100)
iks = IKS()
for x, y in zip(group_A, group_B):
iks.Add(x, 0)
iks.Add(y, 1)
print(iks.KS())
print(ks_2samp(group_A, group_B).statistic) | 333 | 19.875 | 58 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Pure Python/IKSSW.py | from IKS import IKS
from collections import deque
from random import random
class IKSSW:
def __init__(self, values):
'''Incremental Kolmogorov-Smirnov Sliding Window. This class assumes that one window is fixed (reference window) and another slides over a stream of data. The reference window can be updated to be the same as the current sliding window.
Args:
values: initial values for the reference and sliding windows.
'''
self.iks = IKS()
self.sw = deque()
self.reference = [(x, random()) for x in values]
for val in self.reference:
self.iks.AddObservation(val, 1)
for val in values:
wrnd = (val, random())
self.sw.append(wrnd)
self.iks.AddObservation(wrnd, 2)
def Increment(self, value):
'''Remove the oldest observation from the sliding window and replace it with a given value.
Args:
value: the new observation.
'''
self.iks.RemoveObservation(self.sw.popleft(), 2)
wrnd = (value, random())
self.iks.AddObservation(wrnd, 2)
self.sw.append(wrnd)
__call__ = Increment
def Kuiper(self):
'''Kuiper statistic. Both groups must have the same number of observations.
Returns:
The Kuiper statistic.
'''
return self.iks.Kuiper()
def KS(self):
'''Kolmogorov-Smirnov statistic. Both groups must have the same number of observations.
Returns:
The KS statistic D.
'''
return self.iks.KS()
def Update(self):
'''Updates the IKSSW. The reference window becomes the sliding window.
'''
for val in self.reference:
self.iks.Remove(val, 1)
self.reference.clear()
for x in self.sw:
self.reference.append((x[0], random()))
for val in self.reference:
self.iks.Add(val, 1)
def Test(self, ca = 1.95):
'''Test whether the reference and sliding window follow the different probability distributions according to KS Test.
Args:
ca: ca is a parameter used to calculate the threshold for the Kolmogorov-Smirnov statistic. The default value corresponds to a p-value of 0.001. Use IKS.CAForPValue to obtain an appropriate ca.
Returns:
True if we **reject** the null-hypothesis that states that both windows have the same distribution. In other words, we can consider that the windows have now different distributions.
'''
return self.iks.Test(ca)
if __name__ == "__main__":
v = [random() for x in range(10)]
ikssw = IKSSW(v)
print(ikssw.KS(), ikssw.Kuiper(), ikssw.Test())
for i in range(10):
ikssw(random())
print(ikssw.KS(), ikssw.Kuiper(), ikssw.Test())
ikssw.Update()
print(ikssw.KS(), ikssw.Kuiper(), ikssw.Test())
| 2,664 | 29.988372 | 240 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Pure Python/Treap.py | from random import random
class Treap:
def __init__(self, key, value = 0):
self.key = key
self.value = value
self.priority = random()
self.size = 1
self.height = 1
self.lazy = 0
self.max_value = value
self.min_value = value
self.left = None
self.right = None
@staticmethod
def SumAll(node, value):
if node is None:
return
node.value += value
node.max_value += value
node.min_value += value
node.lazy += value
@classmethod
def Unlazy(cls, node):
cls.SumAll(node.left, node.lazy)
cls.SumAll(node.right, node.lazy)
node.lazy = 0
@classmethod
def Update(cls, node):
if node is None:
return
cls.Unlazy(node)
node.size = 1
node.height = 0
node.max_value = node.value
node.min_value = node.value
if node.left is not None:
node.size += node.left.size
node.height = node.left.height
node.max_value = max(node.max_value, node.left.max_value)
node.min_value = min(node.min_value, node.left.min_value)
if node.right is not None:
node.size += node.right.size
node.height = max(node.height, node.right.height)
node.max_value = max(node.max_value, node.right.max_value)
node.min_value = min(node.min_value, node.right.min_value)
node.height += 1
@classmethod
def SplitKeepRight(cls, node, key):
if node is None:
return None, None
left, right = None, None
cls.Unlazy(node)
if key <= node.key:
left, node.left = cls.SplitKeepRight(node.left, key)
right = node
else:
node.right, right = cls.SplitKeepRight(node.right, key)
left = node
cls.Update(left)
cls.Update(right)
return left, right
@classmethod
def Merge(cls, left, right):
if left is None:
return right
if right is None:
return left
node = None
if left.priority > right.priority:
cls.Unlazy(left)
left.right = cls.Merge(left.right, right)
node = left
else:
cls.Unlazy(right)
right.left = cls.Merge(left, right.left)
node = right
cls.Update(node)
return node
@classmethod
def SplitSmallest(cls, node):
if node is None:
return None, None
left, right = None, None
cls.Unlazy(node)
if node.left is not None:
left, node.left = cls.SplitSmallest(node.left)
right = node
else:
right = node.right
node.right = None
left = node
cls.Update(left)
cls.Update(right)
return left, right
@classmethod
def SplitGreatest(cls, node):
if node is None:
return None, None
cls.Unlazy(node)
if node.right is not None:
node.right, right = cls.SplitGreatest(node.right)
left = node
else:
left = node.left
node.left = None
right = node
cls.Update(left)
cls.Update(right)
return left, right
@staticmethod
def Size(node):
return 0 if node is None else node.size
@staticmethod
def Height(node):
return 0 if node is None else node.height
@classmethod
def _ToList(cls, node, extractor, _list = None):
if _list is None:
_list = []
if node is None:
return _list
cls.Unlazy(node)
cls._ToList(node.left, extractor, _list)
_list.append(extractor(node))
cls._ToList(node.right, extractor, _list)
return _list
@classmethod
def KeysToList(cls, node, _list = None):
extractor = lambda x: x.key
return cls._ToList(node, extractor, _list)
@classmethod
def ValuesToList(cls, node, _list = None):
extractor = lambda x: x.value
return cls._ToList(node, extractor, _list)
| 3,699 | 21.02381 | 64 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Python C++ Wrapper/ForgettingBuffer.py | class Node:
def __init__(self, value):
self.value = value
self.next = None
class ForgettingBuffer:
def __init__(self, values):
self.first = None
self.last = None
for val in values:
if self.first == None:
self.first = Node(val)
self.last = self.first
else:
self.last.next = Node(val)
self.last = self.last.next
def __iter__(self):
cur = self.first
while cur != None:
yield cur.value
cur = cur.next
def Increment(self, value):
first_value = self.first.value
self.first = self.first.next
self.last.next = Node(value)
self.last = self.last.next
return first_value
Add = Increment
__call__ = Increment
def Values(self):
return list(self)
if __name__ == "__main__":
fb = ForgettingBuffer([1, 2, 3, 4, 5])
for val in fb:
print(val)
fb(10)
fb(11)
fb(12)
print(list(fb))
print(fb.Values())
| 930 | 18.808511 | 40 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Python C++ Wrapper/testing_parallel_streams.py | from scipy.stats import ks_2samp
from IKS import IKS
import numpy as np
from time import time
from collections import deque
initial_A = np.random.normal(loc = 0, scale = 1, size = 500)
initial_B = np.random.normal(loc = 1, scale = 1, size = 500)
stream_A = np.random.normal(loc = 0, scale = 1, size = 5000)
stream_B = np.random.normal(loc = 1, scale = 1, size = 5000)
######################
## TEST IKS
######################
start = time()
iks_statistics = [] # collect statistics generated by IKS
iks = IKS()
sliding_A = deque(initial_A) # sliding window
sliding_B = deque(initial_B) # sliding window
for a, b in zip(initial_A, initial_B):
iks.Add(a, 0)
iks.Add(b, 1)
# process sliding window
for a, b in zip(stream_A, stream_B):
iks.Remove(sliding_A.popleft(), 0)
iks.Remove(sliding_B.popleft(), 1)
sliding_A.append(a)
sliding_B.append(b)
iks.Add(a, 0)
iks.Add(b, 1)
iks_statistics.append(iks.KS())
finish = time()
print(f'Elapsed time for IKS to process stream: {round(finish - start, 2)} sec')
######################
## TEST ks_2samp
######################
start = time()
ks_2samp_statistics = [] # gather all statistics generated by ks_2samp
sliding_A = deque(initial_A) # sliding window
sliding_B = deque(initial_B) # sliding window
for a, b in zip(stream_A, stream_B):
sliding_A.popleft()
sliding_B.popleft()
sliding_A.append(a)
sliding_B.append(b)
ks_2samp_statistics.append(ks_2samp(sliding_A, sliding_B).statistic)
finish = time()
print(f'Elapsed time for ks_2samp to process stream: {round(finish - start, 2)} sec')
max_diff = np.max(np.abs(np.array(iks_statistics) - np.array(ks_2samp_statistics)))
print(f'Maximum difference between IKS and ks_2samp: {max_diff}') | 1,724 | 24.746269 | 85 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Python C++ Wrapper/IKS.py | from cffi import FFI
ffi = FFI()
ffi.cdef("""
typedef struct {
void * pointer;
} IKS_WrappedPointer;
IKS_WrappedPointer IKS_NewGeneratorWithSeed(unsigned seed);
IKS_WrappedPointer IKS_NewGenerator(void);
void IKS_DeleteGenerator(IKS_WrappedPointer pointer);
IKS_WrappedPointer IKS_NewIKS(IKS_WrappedPointer generatorPointer);
void IKS_DeleteIKS(IKS_WrappedPointer pointer);
int IKS_Test(IKS_WrappedPointer pointer, double ca);
double IKS_KS(IKS_WrappedPointer pointer);
double IKS_Kuiper(IKS_WrappedPointer pointer);
void IKS_AddObservation(IKS_WrappedPointer pointer, double obs, int which_sample);
void IKS_RemoveObservation(IKS_WrappedPointer pointer, double obs, int which_sample);
void IKS_AddCompositeObservation(IKS_WrappedPointer pointer, double obs, double obs_p2, int which_sample);
void IKS_RemoveCompositeObservation(IKS_WrappedPointer pointer, double obs, double obs_p2, int which_sample);
double IKS_KSThresholdForPValue(double pvalue, int N);
double IKS_CAForPValue(double pvalue);
""")
clib = ffi.dlopen("iks.dll")
class Generator:
def __init__(self, seed = None):
if seed == None:
self.wp = clib.IKS_NewGenerator()
else:
self.wp = clib.IKS_NewGeneratorWithSeed(seed)
def __del__(self):
clib.IKS_DeleteGenerator(self.wp)
global_generator = Generator()
class IKS:
def __init__(self, generator = global_generator):
self.wp = clib.IKS_NewIKS(generator.wp)
def __del__(self):
clib.IKS_DeleteIKS(self.wp)
def AddObservation(self, obs, sample):
'''Insert new observation into one of the groups.
Args:
obs: the value of the obseration. Tip: a tuple (actual value, random value) is recommended when there is overlap between groups or if values are not guaranteed to be mostly unique.
group (int): which group the observation belongs to. Must be either 0 or 1.
'''
if isinstance(obs, tuple):
clib.IKS_AddCompositeObservation(self.wp, obs[0], obs[1], sample)
else:
clib.IKS_AddObservation(self.wp, obs, sample)
def RemoveObservation(self, obs, sample):
'''Remove observation from one of the groups.
Args:
obs: the value of the obseration. Must be identical to a previously inserted observation (including the random element of a tuple, if this was the case).
group (int): which group the observation belongs to. Must be either 0 or 1.
'''
if isinstance(obs, tuple):
clib.IKS_RemoveCompositeObservation(self.wp, obs[0], obs[1], sample)
else:
clib.IKS_RemoveObservation(self.wp, obs, sample)
def KS(self):
'''Kolmogorov-Smirnov statistic. Both groups must have the same number of observations.
Returns:
The KS statistic D.
'''
return clib.IKS_KS(self.wp)
def Kuiper(self):
'''Kuiper statistic. Both groups must have the same number of observations.
Returns:
The Kuiper statistic.
'''
return clib.IKS_Kuiper(self.wp)
def Test(self, ca = 1.95):
'''Test whether the reference and sliding window follow the different probability distributions according to KS Test.
Args:
ca: ca is a parameter used to calculate the threshold for the Kolmogorov-Smirnov statistic. The default value corresponds to a p-value of 0.001. Use IKS.CAForPValue to obtain an appropriate ca.
Returns:
True if we **reject** the null-hypothesis that states that both windows have the same distribution. In other words, we can consider that the windows have now different distributions.
'''
return clib.IKS_Test(self.wp, ca) == 1
@staticmethod
def KSThresholdForPValue(pvalue, N):
'''Threshold for KS Test given a p-value
Args:
pval (float): p-value.
N (int): the size of the samples.
Returns:
Threshold t to compare groups 0 and 1. The null-hypothesis is discarded if KS() > t.
'''
return clib.IKS_KSThresholdForPValue(pvalue, N)
@staticmethod
def CAForPValue(pvalue):
'''ca for KS Test given a p-value
Args:
pval (float): p-value.
Returns:
Threshold the "ca" that can be used to compute a threshold for KS().
'''
return clib.IKS_CAForPValue(pvalue)
IKS.Add = IKS.AddObservation
IKS.Remove = IKS.RemoveObservation
if __name__ == "__main__":
import random
iks = IKS()
for i in range(0, 10):
iks.AddObservation(i, 0)
iks.AddObservation(i, 1)
print(iks.KS())
print(iks.Kuiper())
print(iks.Test())
iks = IKS()
for i in range(0, 10):
iks.AddObservation(random.random(), 0)
iks.AddObservation(random.random(), 1)
print(iks.KS())
print(iks.Kuiper())
print(iks.Test())
| 4,612 | 30.813793 | 199 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Python C++ Wrapper/testing_single_stream_rnd_factor.py | from scipy.stats import ks_2samp
from IKS import IKS
import numpy as np
from time import time
from itertools import chain
from random import random
from collections import deque
initial = np.random.normal(loc = 0, scale = 1, size = 500)
stream = list(chain(*[np.random.normal(loc = 1.0 * (i % 2), scale = 1, size = 500) for i in range(10)]))
######################
## TEST IKS
######################
start = time()
iks_statistics = [] # collect statistics generated by IKS
iks = IKS() # group 0 = reference; group 1 = sliding
sliding = deque()
for val in initial:
iks.Add((val, random()), 0)
wrnd = (val, random()) # we only need to keep RND component for values in the sliding window
iks.Add(wrnd, 1)
sliding.append(wrnd)
# process sliding window
for val in stream:
iks.Remove(sliding.popleft(), 1)
wrnd = (val, random())
iks.Add(wrnd, 1)
sliding.append(wrnd)
iks_statistics.append(iks.KS())
finish = time()
print(f'Elapsed time for IKS to process stream: {round(finish - start, 2)} sec')
######################
## TEST ks_2samp
######################
start = time()
ks_2samp_statistics = [] # collect statistics gerated by ks_2samp
sliding = deque(initial) # sliding window
for val in stream:
sliding.popleft()
sliding.append(val)
ks_2samp_statistics.append(ks_2samp(initial, sliding).statistic)
finish = time()
print(f'Elapsed time for ks_2samp to process stream: {round(finish - start, 2)} sec')
max_diff = np.max(np.abs(np.array(iks_statistics) - np.array(ks_2samp_statistics)))
print(f'Maximum difference between IKS and ks_2samp: {max_diff}') | 1,592 | 24.285714 | 104 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Python C++ Wrapper/testing_batch.py | from scipy.stats import ks_2samp
from IKS import IKS
import numpy as np
group_A = np.random.normal(loc = 0, scale = 1, size = 100)
group_B = np.random.normal(loc = 1, scale = 1, size = 100)
iks = IKS()
for x, y in zip(group_A, group_B):
iks.Add(x, 0)
iks.Add(y, 1)
print(iks.KS())
print(ks_2samp(group_A, group_B).statistic) | 333 | 19.875 | 58 | py |
incremental-ks | incremental-ks-master/IncrementalKS/Python C++ Wrapper/IKSSW.py | from IKS import IKS
from collections import deque
from random import random
class IKSSW:
def __init__(self, values):
'''Incremental Kolmogorov-Smirnov Sliding Window. This class assumes that one window is fixed (reference window) and another slides over a stream of data. The reference window can be updated to be the same as the current sliding window.
Args:
values: initial values for the reference and sliding windows.
'''
self.iks = IKS()
self.sw = deque()
self.reference = [(x, random()) for x in values]
for val in self.reference:
self.iks.AddObservation(val, 1)
for val in values:
wrnd = (val, random())
self.sw.append(wrnd)
self.iks.AddObservation(wrnd, 2)
def Increment(self, value):
'''Remove the oldest observation from the sliding window and replace it with a given value.
Args:
value: the new observation.
'''
self.iks.RemoveObservation(self.sw.popleft(), 2)
wrnd = (value, random())
self.iks.AddObservation(wrnd, 2)
self.sw.append(wrnd)
__call__ = Increment
def Kuiper(self):
'''Kuiper statistic. Both groups must have the same number of observations.
Returns:
The Kuiper statistic.
'''
return self.iks.Kuiper()
def KS(self):
'''Kolmogorov-Smirnov statistic. Both groups must have the same number of observations.
Returns:
The KS statistic D.
'''
return self.iks.KS()
def Update(self):
'''Updates the IKSSW. The reference window becomes the sliding window.
'''
for val in self.reference:
self.iks.Remove(val, 1)
self.reference.clear()
for x in self.sw:
self.reference.append((x[0], random()))
for val in self.reference:
self.iks.Add(val, 1)
def Test(self, ca = 1.95):
'''Test whether the reference and sliding window follow the different probability distributions according to KS Test.
Args:
ca: ca is a parameter used to calculate the threshold for the Kolmogorov-Smirnov statistic. The default value corresponds to a p-value of 0.001. Use IKS.CAForPValue to obtain an appropriate ca.
Returns:
True if we **reject** the null-hypothesis that states that both windows have the same distribution. In other words, we can consider that the windows have now different distributions.
'''
return self.iks.Test(ca)
if __name__ == "__main__":
v = [random() for x in range(10)]
ikssw = IKSSW(v)
print(ikssw.KS(), ikssw.Kuiper(), ikssw.Test())
for i in range(10):
ikssw(random())
print(ikssw.KS(), ikssw.Kuiper(), ikssw.Test())
ikssw.Update()
print(ikssw.KS(), ikssw.Kuiper(), ikssw.Test())
| 2,664 | 29.988372 | 240 | py |
pyterpol | pyterpol-master/grid_to_binary.py | #!/usr/bin/env python
import os
import argparse
import numpy as np
def main():
ps = argparse.ArgumentParser()
ps.add_argument('--remove', action='store_true', default=False, help='Removes ascii files.')
ps.add_argument('--overwrite', action='store_true', default=False, help='Overwrites binary files -- mandatory for every machine swap. ')
args = ps.parse_args()
print args
# get grids directory
for gdname in ['grids', 'grids_ABS']:
cwd = os.getcwd()
gdir = os.path.join(cwd, gdname)
dl = os.listdir(gdir)
# go through each grid directory
for direc in dl:
# path to directory
path = os.path.join(gdir, direc)
# directories only
if not os.path.isdir(path):
continue
# list of spectra
gl = os.path.join(path, 'gridlist')
# load the list
synlist = np.loadtxt(gl, dtype=str, unpack=True, usecols=[0])
# transform each spectrum to binary
for synspec in synlist:
# define name of the binary file
bin_synspec = synspec + '.npz'
if os.path.isfile(os.path.join(path, bin_synspec)) and not args.overwrite:
print "File: %s exists." % bin_synspec
if os.path.isfile(os.path.join(path, synspec)) and args.remove:
os.remove(os.path.join(path, synspec))
continue
# load the ascii spectrum and save it as binary file
w, i = np.loadtxt(os.path.join(path, synspec), unpack=True, usecols=[0, 1])
np.savez(os.path.join(path, bin_synspec), w, i)
if os.path.isfile(os.path.join(path, synspec)) and args.remove:
os.remove(os.path.join(path, synspec))
if __name__ == '__main__':
main()
| 1,998 | 34.696429 | 140 | py |
pyterpol | pyterpol-master/__init__.py | # reads basic classes
from .synthetic.makespectrum import SyntheticSpectrum
from .synthetic.makespectrum import SyntheticGrid
from .observed.observations import ObservedSpectrum
from .fitting.interface import ObservedList
from .fitting.interface import StarList
from .fitting.interface import RegionList
from .fitting.interface import Interface
from .fitting.parameter import Parameter
from .fitting.fitter import Fitter
from .synthetic.auxiliary import parlist_to_list
from .plotting.plotting import *
# setup default directories of the grid | 543 | 37.857143 | 53 | py |
pyterpol | pyterpol-master/fitting/fitter.py | import os
import nlopt
import emcee
# import warnings
import numpy as np
from scipy.optimize import fmin
from scipy.optimize import fmin_slsqp
try:
from scipy.optimize import differential_evolution
except ImportError as ex:
print ex
differential_evolution = None
from pyterpol.synthetic.auxiliary import parlist_to_list
from pyterpol.synthetic.auxiliary import string2bool
from pyterpol.synthetic.auxiliary import read_text_file
from pyterpol.synthetic.auxiliary import renew_file
fitters = dict(
sp_nelder_mead=dict(par0type='value',
optional_kwargs=['xtol', 'ftol', 'maxiter', 'maxfun'],
object=fmin,
uses_bounds=False,
info='Nelder-Mead simplex algorithm. '
'Implemetation: http://docs.scipy.org/doc/scipy-0.16.1/reference/generated/'
'scipy.optimize.fmin.html#scipy.optimize.fmin Ineffective for high dimensional'
' parameter space.'),
sp_slsqp=dict(par0type='value',
optional_kwargs=['ftol'],
object=fmin_slsqp,
uses_bounds=True,
info='Sequential Least Square Programming. '
'Implemetation: http://docs.scipy.org/doc/scipy-0.16.1/reference/generated/'
'scipy.optimize.fmin.html#scipy.optimize.fmin Ineffective for high dimensional'
' parameter spacse.'),
sp_diff_evol=dict(par0type='limit',
optional_kwargs=['popsize', 'tol', 'strategy', 'maxiter'],
object=differential_evolution,
uses_bounds=False,
info='Differential evolution algorithm.'
'Implemetation: http://docs.scipy.org/doc/scipy-0.16.1/reference/generated/'
'scipy.optimize.fmin.html#scipy.optimize.fmin.'),
nlopt_nelder_mead=dict(par0type='value',
optional_kwargs=['xtol', 'ftol', 'maxfun'],
object=None,
environment=nlopt.LN_NELDERMEAD,
uses_bounds=True,
info='Nelder-Mead Simplex. Implementation NLOPT: Steven G. Johnson, '
'The NLopt nonlinear-optimization package, http://ab-initio.mit.edu/nlopt.'),
nlopt_sbplx=dict(par0type='value',
optional_kwargs=['xtol', 'ftol', 'maxfun'],
object=None,
environment=nlopt.LN_SBPLX,
uses_bounds=True,
info='Sbplx - a variation of the Tom Rowans Subplex. '
'Implementation NLOPT: Steven G. Johnson, The NLopt '
'nonlinear-optimization package, http://ab-initio.mit.edu/nlopt.'),
)
class Fitter(object):
"""
"""
def __init__(self, name=None, fitparams=None, verbose=False, debug=False, fitlog='fit.log', **kwargs):
"""
:param name: name of the fitting environment
:param fitparams a list of Parameter types
:param verbose whether to save detailed chi_square information
:param debug: debugmode
:param fitlog: file in which the fitting is logged
:param kwargs: fitting environment control keywords
:return:
"""
# pass the parameters
if fitparams is None:
self.fitparams = []
else:
self.fitparams = fitparams
self.verbose = verbose
self.fitlog = fitlog
self.debug = debug
self.fittername = name
# empty parameters
self.fitter = None
self.fit_kwargs = {}
self.par0 = []
self.uses_bounds = False
self.family = None
self.vmins = None
self.vmaxs = None
self.nlopt_environment = None
# empty list of all trial fits
self.iters = []
self.parameter_identification = None
# iteration number
self.iter_number = 0
# choose a fitter if one
# was given
if name is not None:
self.choose_fitter(name, **kwargs)
def __call__(self, func, *args):
"""
:param func:
:param args:
:return:
"""
# emtpy the fitlog
renew_file(self.fitlog)
# reset the counter and clear the fitting
self.iter_number = 0
self.iters = []
# debug
if self.debug:
print "Started fitted with fitting environment: %s\n" \
" vector of parameters: %s and optional" \
" enviromental parameters: %s." % (self.fittername, str(self.par0), str(self.fit_kwargs))
if len(self.par0) == 0:
raise ValueError('No initial vector of parameters (wrapped in Parameter class) was passed.')
# check that initial parameters do not lie outside the fitted region.
self.check_initial_parameters()
# run fitting
if self.family == 'sp':
if self.uses_bounds:
bounds = [[vmin, vmax] for vmin, vmax in zip(self.vmins, self.vmaxs)]
self.result = self.fitter(func, self.par0, args=args, bounds=bounds, **self.fit_kwargs)
else:
self.result = self.fitter(func, self.par0, args=args, **self.fit_kwargs)
elif self.family == 'nlopt':
# define function for the nlopt fitter
def f(x, grad):
return func(x, *args)
# check that we are searching minimum
self.fitter.set_min_objective(f)
# the fitting
self.result = self.fitter.optimize(self.par0)
# we want only set of parameters for the result
# very in elegant
if not isinstance(self.result, (list, tuple, type(np.array([])))):
self.result = self.result.x
def __str__(self):
"""
String representation of the class.
:return:
"""
string = ''
string += 'Fitter: %s optional_arguments: %s\n' % (self.fittername, str(self.fit_kwargs))
string += 'Initial parameters:'
for i, par in enumerate(self.fitparams):
string += "(%s, g.): (%s, %s); " % (par['name'], str(self.par0[i]), str(par['group']))
if (i + 1) % 5 == 0:
string += '\n'
string += '\n'
return string
def append_iteration(self, iter):
"""
Appends each iteration.
:param iter the iteration
:return:
"""
# TODO this function has to be improved.
self.iter_number += 1
# print iter
self.iters.append(iter)
# if the number of iterations exceeds a certain number
# they are written to a file
if self.iter_number % 1000 < 1:
self.flush_iters()
self.iters = []
def clear_all(self):
"""
:return:
"""
self.__init__()
def check_initial_parameters(self):
"""
Checks that initial parameters do not lie outside the fitted region.
:return:
"""
p0 = self.par0
for i, p in enumerate(self.fitparams):
# differential evolution uses interval as a p0, and
# this function tests only floats
if isinstance(p0[i], (list, tuple)):
continue
if (p0[i] > p['vmax']) | (p0[i] < p['vmin']):
raise ValueError('Parameter %s (group %i) lies outside the fitted regions! %f not in (%f, %f)' %
(p['name'], p['group'], p['value'], p['vmin'], p['vmax']))
def choose_fitter(self, name, fitparams=None, init_step=None, **kwargs):
"""
Selects a fitter from the list of available ones and
prepares the fitting variables.
:param name: name of the fitting environment
:param fitparams: list of fitted parameters ech wrapped within Parameter class
:param kwargs: keyword arguments controlling the respective fitting environement
:return:
"""
# clear the class first
self.clear_all()
# check the input
if name.lower() not in fitters.keys():
raise ValueError('Fitter: %s is unknown. Registered fitters are:\n %s.' % (name, self.list_fitters()))
else:
self.fitter = fitters[name]['object']
self.fittername = name
for key in kwargs.keys():
if key not in fitters[name]['optional_kwargs']:
raise KeyError('The parameter: %s is not listed among '
'optional_kwargs for fitter: %s. The eligible'
'optional_kwargs are: %s' % (key, name, str(fitters[name]['optional_kwargs'])))
else:
self.fit_kwargs[key] = kwargs[key]
if self.debug:
print 'Choosing environment: %s\n' \
' environmental parameters: %s.' % (name, str(self.fit_kwargs))
# if we want to change the fitted parameters
if fitparams is None:
fitparams = self.fitparams
else:
self.fitparams = fitparams
# set up initial value
if fitters[name]['par0type'] == 'value':
self.par0 = parlist_to_list(fitparams, property='value')
if fitters[name]['par0type'] == 'limit':
vmins = parlist_to_list(fitparams, property='vmin')
vmaxs = parlist_to_list(fitparams, property='vmax')
self.par0 = [[vmin, vmax] for vmin, vmax in zip(vmins, vmaxs)]
if self.debug:
print 'Setting initial parameters: %s' % str(self.par0)
# checks that there are any fitting boundaries
if fitters[name]['uses_bounds']:
self.uses_bounds = True
self.vmins = parlist_to_list(fitparams, property='vmin')
self.vmaxs = parlist_to_list(fitparams, property='vmax')
else:
self.uses_bounds = False
# set up family
self.family = name.split('_')[0]
if self.family == 'nlopt':
self.nlopt_environment = fitters[name]['environment']
self.setup_nlopt(init_step=init_step)
def flush_iters(self, f=None):
"""
Flushes all records within self.iters to a file
:param f: filename
:return:
"""
if f is None:
f = self.fitlog
# create a block of lines
lines = []
# if the file is empty add header
# print os.path.getsize(self.fitlog)
if os.path.getsize(self.fitlog) == 0:
# construct the header
header = self.make_header()
lines.append(header)
for row in self.iters:
line = ''
# create a row of parameters + chi2
p = row['parameters']
d = np.zeros(len(p)+1)
d[:-1] = p
d[-1] = row['chi2']
for i in range(0, len(d)):
line += '%s ' % str(d[i])
line += '\n'
# append the row
lines.append(line)
# print line
# write the to a file
ofile = open(f, 'a')
ofile.writelines(lines)
ofile.close()
def run_mcmc(self, chi_square, chain_file, fitparams, nwalkers, niter, *args):
"""
:param chi_square
:param fitparams
:param nwalkers
:param niter
:param args
:return:
"""
def lnlike(pars, *args):
"""
Model probability.
:param pars:
:param args:
:return:
"""
return -0.5*chi_square(pars, *args)
# define the boundaries and the priors
def lnprior(pars):
"""
Prior probabilities i.e. boundaries.
:param pars:
:return:
"""
for p, vmin, vmax in zip(pars, self.vmins, self.vmaxs):
if (p < vmin) | (p > vmax):
return -np.inf
return 0.0
def lnprob(pars, *args):
"""
The full probability function.
:param pars:
:param args:
:return:
"""
lp = lnprior(pars)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(pars, *args)
# get the dimensions
ndim = len(fitparams)
# initialize the sampler
pos = np.array([[wmin + (wmax - wmin) * np.random.rand() for wmin, wmax in zip(self.vmins, self.vmaxs)]
for i in range(nwalkers)])
# setup the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=args)
# initialize the file - create the header
if self.parameter_identification is not None:
header = [self.make_header()]
else:
header = ['']
# write the header and close the file
ofile = open(chain_file, 'w')
ofile.writelines(header)
ofile.close()
# run the sampler
for result in sampler.sample(pos, iterations=niter, storechain=False):
position = result[0]
ofile = open(chain_file, 'a')
for k in range(position.shape[0]):
ofile.write("%d %s %f\n" % (k, " ".join(['%.12f' % i for i in position[k]]), result[1][k]))
ofile.close()
@staticmethod
def list_fitters():
"""
Lists all fitters.
:return: string : a list of all fitters.
"""
string = '\n'.rjust(100, '=')
for key in fitters.keys():
string += "Name: %s\n" % key
string += "Optional parameters: %s\n" % str(fitters[key]['optional_kwargs'])
string += "Uses boundaries: %s\n" % str(fitters[key]['uses_bounds'])
string += "Description: %s\n" % fitters[key]['info']
string += '\n'.rjust(100, '=')
return string
def load(self, f):
"""
Loads the text representation of the class from
a file f.
:param f
:return:
"""
# read the file
lines = read_text_file(f)
data_start = len(lines)
for i, l in enumerate(lines):
if l.find('FITTER') > -1:
data_start = i
break
# check that there are actually some data in the file
if data_start >= len(lines):
return False
# create the class
fitter = Fitter()
name = None
fit_kwargs = {}
# from here the file is actually being read
for i, l in enumerate(lines[data_start+1:]):
# once we reach FITTER again we end
if l.find('FITTER') > -1:
break
# split the line
d = l.split()
# print d
# save the name
if d[0].find('fitter:') > -1:
name = d[1]
# save the kwargs
elif d[0].find('fit_parameters:') > -1:
d = d[1:]
if len(d) < 2:
continue
fit_kwargs = {d[i].strip(':'): float(d[i+1]) for i in range(0, len(d), 2)}
# do the same for enviromental keys
if d[0].find('env_keys:') > -1:
# the first string is just identification
d = d[1:]
# secure corrct types
recs = ['debug', 'verbose', 'fitlog']
cast_types = [string2bool, string2bool, str]
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
for k in cdict.keys():
if k in recs:
i = recs.index(k)
ctype = cast_types[i]
cdict[k] = ctype(cdict[k])
# assign the vlues
setattr(fitter, k, cdict[k])
# choose the fitter
if name != 'None':
fitter.choose_fitter(name, **fit_kwargs)
else:
return False
# finally assign everything to self
attrs = ['debug', 'fittername', 'verbose', 'fitlog', 'fit_kwargs']
for attr in attrs:
setattr(self, attr, getattr(fitter, attr))
# if we got here, we loaded the data
return True
def make_header(self):
"""
Creates the header for output file.
:return:
"""
header = ''
for key in self.parameter_identification.keys():
if key != 'value':
header += '# %s: ' % key
for rec in self.parameter_identification[key]:
header += '%s ' % str(rec)
header += '\n'
return header
def save(self, ofile):
"""
Saves the class. It should be retrievable from the file.
Since this class really cannot exist without the
interface, it really saves only the selected fitting
environment and fitted kwargs.
:param ofile:
:return:
"""
# Open the file
if isinstance(ofile, str):
ofile = open(ofile, 'w+')
# row announcing the fitter
string = ' FITTER '.rjust(105, '#').ljust(200, '#') + '\n'
# name of the fitter
string += 'fitter: %s\n' % self.fittername
string += 'fit_parameters: '
# writes the fitting kwargs
for fkey in self.fit_kwargs:
string += '%s: %s ' % (fkey, str(self.fit_kwargs[fkey]))
string += '\n'
# writes enfiromental keys
enviromental_keys = ['debug', 'verbose', 'fitlog']
string += 'env_keys: '
for fkey in enviromental_keys:
string += "%s: %s " % (fkey, str(getattr(self, fkey)))
string += '\n'
string += ' FITTER '.rjust(105, '#').ljust(200, '#') + '\n'
# write the remaining parameters
ofile.writelines(string)
def setup_nlopt(self, init_step=None):
"""
Sets up the the NLOPT fitter.
:return:
"""
if self.debug:
print "Setting up NLOPT minimizer."
# length of the fitted parameters
n = len(self.fitparams)
# configures the fitter
self.fitter = nlopt.opt(self.nlopt_environment, n)
# setup parameters for fitting terminatio
for key in self.fit_kwargs.keys():
if key == 'xtol':
self.fitter.set_xtol_rel(self.fit_kwargs[key])
if key == 'ftol':
self.fitter.set_ftol_rel(self.fit_kwargs[key])
if key == 'maxfun':
self.fitter.set_maxeval(self.fit_kwargs[key])
# setup boundaries
if self.uses_bounds:
self.fitter.set_lower_bounds(self.vmins)
self.fitter.set_upper_bounds(self.vmaxs)
# setup initial step, which can be either
# user-defined or default
if init_step is None:
stepsize = (np.array(self.vmaxs) - np.array(self.vmins)) / 4.
stepsize = stepsize.tolist()
else:
stepsize = init_step
self.fitter.set_initial_step(stepsize)
def set_lower_boundary(self, arr):
"""
Sets lower boundary.
:param arr:
:return:
"""
self.vmins = arr
def set_upper_boundary(self, arr):
"""
Sets upper boundary.
:param arr:
:return:
"""
self.vmaxs = arr
def set_fit_properties(self, pi):
"""
Sets identification of parameters i.e. names, groups and components
:param pi: dictionary with the records for each parameter
the order have to be the same as for the fitted parameter
:return:
"""
self.parameter_identification = pi
| 20,033 | 31.842623 | 114 | py |
pyterpol | pyterpol-master/fitting/parameter.py | # definition of parameters - here I add parameters which apply for implemented
# grids. To fit additional parameters, one will have to define along with
# addition of new grids, or here..
parameter_definitions=dict(
teff=dict(name='teff', value=10000., vmin=6000., vmax=50000., unit='K', fitted=False, group=0, typedef=(float)),
logg=dict(name='logg', value=3.5, vmin=0.0, vmax=5.0, unit='log(g.cm^-2)', fitted=False, group=0, typedef=(float)),
vrot=dict(name='vrot', value=0.0, vmin=0.0, vmax=500., unit='km.s^-1', fitted=False, group=0, typedef=(float)),
rv=dict(name='rv', value=0.0, vmin=-1000., vmax=1000., unit='km.s^-1', fitted=False, group=0, typedef=(float)),
lr=dict(name='lr', value=1.0, vmin=0.0, vmax=1.0, unit='relative', fitted=False, group=0, typedef=(float)),
z=dict(name='z', value=1.0, vmin=0.0, vmax=2.0, unit='Z_solar', fitted=False, group=0, typedef=(float)),
)
# parameter_definitions=dict(
# teff=dict(name='teff', value=10000., vmin=6000., vmax=50000., unit='K', fitted=False, group=None, typedef=float),
# logg=dict(name='logg', value=3.5, vmin=0.0, vmax=5.0, unit='log(g.cm^-2)', fitted=False, group=None, typedef=float),
# vrot=dict(name='vrot', value=0.0, vmin=0.0, vmax=500., unit='km.s^-1', fitted=False, group=None, typedef=float),
# rv=dict(name='rv', value=0.0, vmin=-1000., vmax=1000., unit='km.s^-1', fitted=False, group=None, typedef=float),
# lr=dict(name='lr', value=1.0, vmin=0.0, vmax=1.0, unit='relative', fitted=False, group=None, typedef=float),
# z=dict(name='z', value=1.0, vmin=0.0, vmax=2.0, unit='Z_solar', fitted=False, group=None, typedef=float),
# )
class Parameter(object):
"""
"""
def __init__(self, name=None, value=None, vmin=None, vmax=None, unit=None, fitted=None,
group=None, typedef=None, debug=False):
"""
:param name: name of the parameter
:param value: of the parameter
:param vmin: minimal value
:param vmax: maximal value
:param fitted: is optimized
:param group: group for which the parameter applies
:return: None
"""
# pass all arguments
self.name = name
self.value = value
self.vmin = vmin
self.vmax = vmax
self.unit = unit
self.fitted = fitted
self.group = group
self._typedef = typedef
if typedef:
for var in [value, vmin, vmax]:
self.check_type(var)
# define floats
self._float_attributes = ['value', 'vmin', 'vmax']
# define debug_mode
self.debug = debug
def __getitem__(self, item):
"""
:param item: desired attribute of the class
:return: value of the parameter
"""
if hasattr(self, item):
return getattr(self, item)
else:
raise AttributeError('The parameter %s has no attribute %s.' % (str(self), item))
def __setitem__(self, item, value):
"""
:param item: changed attribute
:param value: new value
:return:None
"""
if hasattr(self, item):
# if type was passed, than check them all the time they are changed
if item in self._float_attributes and self._typedef is not None:
self.check_type(value)
setattr(self, item, value)
else:
raise AttributeError('The parameter %s has no attribute %s.' % (str(self), item))
def __str__(self):
"""
:return: string represantation of the class
"""
string = ''
for var in ['name', 'value', 'vmin', 'vmax', 'fitted', 'group', '_typedef']:
string += "%s: %s " % (var, str(getattr(self, var)))
string += '\n'
return string
def check_type(self, value):
"""
:param value: value to be checked
:return: bool whether the tested value has correct type
"""
if self._typedef is None:
raise ValueError('The type of parameter %s has not been set.' % str(self))
else:
if not isinstance(value, self._typedef):
raise TypeError('The passed value has not correct type.'
' Correct type is %s.' % str(self._typedef))
def get_range(self):
"""
Returns boundaries of the fitting.
:return: list, boundaries
"""
return [self['vmin'], self['vmax']]
def get_error(self, relative=None):
"""
:param relative: relative error desired by the user
:return: error - uncertainty of the parameter.,
"""
# if relative error is not give, the uncertainty is
# taken from boundaries
if relative is None:
error = (self['vmin'] + self['vmax']) / 2.
# otherwise it is a fraction of the value
else:
error = relative * self['value']
return error
def set_empty(self):
"""
Converts attributes from None, to
something more reasonable for
cases when only name and value
were set.
"""
if self.group is None:
self.group = 0
if self.fitted is None:
self.fitted = False
if self.unit is None:
self.unit = 'not_defined'
if self.vmin is None:
self.vmin = -1e6
if self.vmax is None:
self.vmax = 1e6
| 5,467 | 34.277419 | 122 | py |
pyterpol | pyterpol-master/fitting/__init__.py | 0 | 0 | 0 | py |
|
pyterpol | pyterpol-master/fitting/interface.py | # -*- coding: utf-8 -*-
import copy
import corner
# import sys
import warnings
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from pyterpol.synthetic.makespectrum import SyntheticGrid
from pyterpol.observed.observations import ObservedSpectrum
from pyterpol.fitting.parameter import Parameter
from pyterpol.fitting.parameter import parameter_definitions
from pyterpol.fitting.fitter import Fitter
from pyterpol.synthetic.auxiliary import generate_least_number
from pyterpol.synthetic.auxiliary import keys_to_lowercase
from pyterpol.synthetic.auxiliary import read_text_file
from pyterpol.synthetic.auxiliary import string2bool
from pyterpol.synthetic.auxiliary import sum_dict_keys
from pyterpol.synthetic.auxiliary import ZERO_TOLERANCE
from pyterpol.plotting.plotting import *
# repeat userwarnings
warnings.simplefilter('always', UserWarning)
class Interface(object):
"""
"""
def __init__(self, sl=None, rl=None, ol=None, fitter=None, debug=False,
adaptive_resolution=True, spectrum_by_spectrum=None,
log_iterations=False):
"""
:param sl: StarList type
:param rl: RegionList type
:param ol: ObservedList type
:param fitter
:param debug
:param adaptive_resolution - this (sounds better than it actually is)
just means that resolution of the grid is set to twice
the resolution of the spectrum with highest resolution
:return:
"""
# StarList is deepcopied by value, because
# it is adjusted by the Interface
if sl is not None:
self.sl = sl.copy()
else:
self.sl = None
# RegionList and the ObservedList are copied
# by reference
self.rl = rl
self.ol = ol
self.synthetics = {}
self.grids = {}
self.fitter = fitter
self.spectrum_by_spectrum = spectrum_by_spectrum
# debug mode
self.debug = debug
# define empty comparison list
self.comparisonList = None
# parameters that cannot be obatined through interpolation
self._not_given_by_grid = ['lr', 'rv', 'vrot']
# relation between rv_groups and regions
self.rel_rvgroup_region = {}
# properties of synthetic spectra
self._synthetic_spectrum_kwargs = dict(step=0.01, order=4, padding=20.)
# properties of grids
self._grid_kwargs = dict(mode='default', debug=debug)
# initialization of various boolean variables
self.grid_properties_passed = False
self.fit_is_running = False
self.adaptive_resolution = adaptive_resolution
self.log_iterations = log_iterations
# temporary variable for info on the fitted parameters
self.ident_fitted_pars = None
self.one4all = False
def __str__(self):
"""
String representation of the class
:return:
"""
string = ""
for attr, name in zip(['sl', 'rl', 'ol', 'fitter'], ['StarList', 'RegionList', 'ObservedList', 'Fitter']):
string += '%s%s\n' % (name[:len(name)/2].rjust(50, '='), name[len(name)/2:].ljust(50, '='))
string += str(getattr(self, attr))
string += ''.ljust(100, '=')
return string
def accept_fit(self):
"""
Propagates the fitting result to the class.
:return:
"""
# this should be done more carefully
final_pars = self.fitter.result
print "FINAL PARAMETERS:", final_pars
# list fitted parameters
fitparams = self.get_fitted_parameters()
# updates the parameters with the result
for i in range(0, len(final_pars)):
fitparams[i]['value'] = final_pars[i]
# update the fitter with new initial parameters
self.fitter.par0 = copy.deepcopy(final_pars)
def add_comparison(self, region=None, parameters={}, observed=None, groups={}):
"""
:param region the name of the corresponding region
:param parameters a dictionary of the parameters required for the synthetic
spectrum
:param observed the observed spectrum
:param groups
Add a record to the comparisonList
:return: None
"""
if self.debug:
print 'Settting comparison for region: %s \n groups: %s. \n parameters: %s' % \
(str(region), str(groups), str(parameters))
if self.comparisonList is None:
raise Exception('The comparisonList has not been defined yet. Use Inteface.ready_comparison for that.')
else:
# pass the regions
wmin = self.rl.mainList[region]['wmin']
wmax = self.rl.mainList[region]['wmax']
# try to read out the observed spectrum - everything
if observed is not None:
try:
ow, oi, oe = observed.get_spectrum(wmin, wmax)
except:
# if it does not work out..
ow = observed.get_spectrum(wmin, wmax)
oi = None
oe = None
self.comparisonList.append(dict(region=region,
parameters=parameters,
observed=observed,
groups=groups,
synthetic={x: None for x in parameters.keys()},
chi2=0.0,
wmin=wmin,
wmax=wmax,
wave=ow,
intens=oi,
error=oe
)
)
def clear_all(self):
"""
Clears the class.
:return:
"""
self.comparisonList = None
self.grids = {}
self.ol = None
self.rl = None
self.sl = None
self.fitter = None
self.synthetics = {}
self._grid_kwargs = {}
self._synthetic_spectrum_kwargs = {}
self.rel_rvgroup_region = {}
self.grid_properties_passed = False
self.ident_fitted_pars = None
def compute_chi2(self, pars=[], l=None, verbose=False):
"""
:param pars:
:param l
:param verbose
:return: chi square
"""
if l is None:
l = self.comparisonList
# accounts for cases when we just evaluate current chi^2
if len(pars) == 0:
pars = self.get_fitted_parameters(attribute='value')
# propagate the parameters to the
# parameterlist and update it
self.propagate_and_update_parameters(l, pars)
# reads out the chi_2 from individual spectra
chi2 = self.read_chi2_from_comparisons(l, verbose)
# if we are fitting we store the info on the parameters
if self.fit_is_running & self.log_iterations:
self.fitter.append_iteration(dict(parameters=copy.deepcopy(pars), chi2=chi2))
else:
self.fitter.iter_number += 1
# print every hundredth iteration
if self.debug:
print 'Computed model: %s chi2: %s' % (str(pars), str(chi2))
else:
if (self.fitter.iter_number+1) % 100 == 0:
print 'Computed model: %s chi2: %s' % (str(pars), str(chi2))
return chi2
def compute_chi2_treshold(self, l=None, alpha=0.67):
"""
Computes confidence level from normallized chi^2.
It is of course not correct, but what can be done,
when the model is evidently incorrect??
:param l the list of comparisons
:param alpha the chi-square treshold
:return:
"""
# use in-built comparison list of
# no other was passed
if l is None:
l = self.comparisonList
# get the degrees of freedom
ddof = self.get_degrees_of_freedom(l)
# estimate confidence limits
chi2 = stats.chi2(ddof)
vmin, vmax = chi2.interval(alpha)
# now get vthe maximal value relative
# to the minimal - minimal value is
# what we get with the minimization
# ratio = vmax/vmin
diff = vmax-vmin
# return ratio
return diff
def change_observed_list(self, ol):
"""
Removes the old observe list and adds a new one.
It also resets the group assignment between
regions a and radial velocity groups. Each
observed spectrum should have a rv group
assigned. Otherwise the outcome might be
wrong.
:param ol:
:return:
"""
if self.ol is None:
warnings.warn('There was no ObservedList attached to the Interface. Correct?')
else:
self.ol.clear_all()
# attach new observed list
self.ol = ol
# reset the rv-group settings
self._setup_rv_groups()
def copy(self):
"""
Creates a copy of self.
:return:
"""
other = Interface()
for attr in ['ol', 'sl', 'rl', 'fitter', 'spectrum_by_spectrum',
'adaptive_resolution', 'debug', '_grid_kwargs',
'_synthetic_spectrum_kwargs']:
v = copy.deepcopy(getattr(self, attr))
setattr(other, attr, v)
return other
def choose_fitter(self, *args, **kwargs):
"""
Just wrapper for the Fitter.choose_fitter method
see parameter descriptio there.
:param args:
:param kwargs:
:return:
"""
# fitter is rather simple, so if there is none set, we set an empty
# one
if self.fitter is None:
self.fitter = Fitter(debug=self.debug)
# select fitted parameters
if 'fitparams' not in kwargs.keys():
fitparams = self.get_fitted_parameters()
kwargs['fitparams'] = fitparams
self.fitter.choose_fitter(*args, **kwargs)
def draw_random_sample(self):
"""
Takes a random sample from the data. This random sample
contains the same name of observations as the original
one -- i.e. some observations repeat within the sample.
:return:
"""
# get number of observations
nobs = len(self.ol)
# take original spectra and groups
rv_groups = self.ol.observedSpectraList['group']['rv']
spectra = self.ol.observedSpectraList['spectrum']
# make random data sample
ind = np.sort(np.random.randint(nobs, size=nobs))
random_rv_groups = [rv_groups[i] for i in ind]
random_spectra = [spectra[i] for i in ind]
# reset group numbers
newobs = []
for i in range(0, len(random_spectra)):
newobs.append(dict(filename=random_spectra[i].filename,
error=random_spectra[i].global_error,
group=dict(rv=i),
hjd=random_spectra[i].hjd),
)
# create new list of observations
ol = ObservedList()
ol.add_observations(newobs)
# copy the starlist
sl_new = self.sl.copy()
for i, rndg in enumerate(random_rv_groups):
pars = self.sl.get_parameter(rv=rndg)
for c in self.sl.get_components():
sl_new.set_parameter(name='rv', component=c, group=i, value=pars[c][0].value)
# get regions
rl = self.rl
# create bew Interface
itf = Interface(sl=sl_new, rl=rl, ol=ol)
# set attributes
setattr(itf, 'grids', self.grids)
setattr(itf, 'synthetics', self.synthetics)
setattr(itf, '_grid_kwargs', self._grid_kwargs)
setattr(itf, '_synthetic_spectrum_kwargs', self._synthetic_spectrum_kwargs)
setattr(itf, 'fitter', self.fitter)
setattr(itf, 'adaptive_resolution', self.adaptive_resolution)
setattr(itf, 'debug', self.debug)
# finalize
itf._setup_rv_groups()
itf.ready_comparisons()
itf.populate_comparisons()
return itf
@staticmethod
def extract_parameters(l, attr='value'):
"""
Converts a list of parameter class to a
dictionary.
:param l
:param attr
:return:
"""
params = {par['name']: par[attr] for par in l}
return params
@staticmethod
def evaluate_mcmc(f=None, treshold=100):
"""
Returns best-fit values and errors estimated from the convergence.
:param f: mcmc log
:param treshold
:return:
"""
# read the fitlog
log, nwalkers, niter, npars = read_mc_chain(f)
# take only data, where the mcmc, has burnt in
log['data'] = log['data'][nwalkers*treshold:,:]
# best result
minind = np.argmin(-log['data'][:, -1])
# outputlist of errors
errors = {}
# fill the dictionary with errors
for i in range(0, len(log['component'])):
# parameter component, group
p = log['name'][i]
c = log['component'][i]
g = log['group'][i]
if c not in errors.keys():
errors[c] = {}
if p not in errors[c].keys():
errors[c][p] = []
# get the error estimate
best = log['data'][minind, i]
lower = log['data'][:, i].min() - best
upper = log['data'][:, i].max() - best
gauss_mean = log['data'][:, i].mean()
gauss_sigma = log['data'][:, i].std(ddof=1)
# append the value
errors[c][p].append(dict(best=best, group=g, gauss_mean=gauss_mean,
gauss_sigma=gauss_sigma, lower=lower, upper=upper))
return errors
def get_comparisons(self, verbose=False, **kwargs):
"""
Narrows down the number of comparisons.
:param verbose return indices in the original list
:param kwargs parameters according to the comparison list will be narrowed down
:return:
"""
# empty arrays for the output
clist = []
indices = []
# parameter keys
keys = kwargs.keys()
# go over each recordd within list of comparisons
for i in range(0, len(self.comparisonList)):
# the keys that we test are somewhat heterogeneous
# thsi construction is not pretty.
include = True
for key in keys:
# print key
# what if the key lies
if key in self.comparisonList[i]['groups'].keys() \
and (kwargs[key] != self.comparisonList[i]['groups'][key]):
include = False
break
if hasattr(self.comparisonList[i]['observed'], key) and \
self.comparisonList[i]['observed'].key != kwargs[key]:
include = False
break
if key == 'region' and self.comparisonList[i]['region'] != kwargs[key]:
include = False
break
# if it survived all tests it is included
if include:
clist.append(self.comparisonList[i])
indices.append(i)
# if we want to get indices of the found in the original array
if verbose:
return clist, indices
else:
return clist
def get_defined_groups(self, component=None, parameter=None):
"""
Returns a dictionary of defined groups
:param component:
:param parameter:
:return:
"""
return self.sl.get_defined_groups(component=component, parameter=parameter)
def get_degrees_of_freedom(self, l=None):
"""
Computes degrees of freadom for a given comparison list
:param l:
:return: number of degrees of freedom
"""
if l is None:
l = self.comparisonList
# number of fitted parameters
m = len(self.get_fitted_parameters())
n = 0
# number of fitted spectra points
for rec in l:
for c in rec['synthetic'].keys():
n += len(rec['synthetic'][c])
return n-m
def get_fitted_parameters(self, attribute=None):
"""
lists all fitted Parameters or a list of one
of their attributes
:param
:return:
"""
# return the list of Parameters
if attribute is None:
return self.sl.get_fitted_parameters()
else:
return [par[attribute] for par in self.sl.get_fitted_parameters()]
def get_observed_spectra_number(self):
"""
:return:
"""
if self.ol is not None:
return len(self.ol)
else:
return 0
def get_observed_spectrum(self, filename=None):
"""
Returns observed spectrum accoreding to its name.
:param filename name of the querried spectrum
:return:
"""
return self.ol.get_spectra(filename=filename)[0]
def list_comparisons(self, l=None):
"""
This function displays all comparisons.
:param l list of comparisons
:return: string
"""
if l is None:
l = self.comparisonList
string = ''
for i, rec in enumerate(l):
string += "========================= Comparison %s =========================\n" % str(i).zfill(3)
reg = rec['region']
# list region
string += 'region: %s:(%s,%s)\n' % (reg, str(self.rl.mainList[reg]['wmin']),
str(self.rl.mainList[reg]['wmax']))
# list observed spectrum
if rec['observed'] is not None:
string += "observed: %s\n" % rec['observed'].filename
else:
string += "observed: NONE\n"
# lists all parameters
for c in rec['parameters'].keys():
string += 'component: %s ' % c
# print rec['parameters'][c]
for par in rec['parameters'][c]:
string += "%s: %s " % (par['name'], str(par['value']))
string += '\n'
# list all groups
string += 'groups: %s\n' % str(rec['groups'])
string += 'chi2: %s\n' % str(rec['chi2'])
string += "==================================================================\n"
return string
def list_fitters(self):
"""
Lists all available fitters.
:return:
"""
if self.fitter is not None:
return self.fitter.list_fitters()
else:
raise AttributeError('No fitter has been attached yet.')
@staticmethod
def load(f, one4all=False):
"""
Loads the type from a file created with the save method.
:param f: the loaded file
:return:
"""
# first load the interface
# read the file
lines = read_text_file(f)
data_start = len(lines)
for i, l in enumerate(lines):
if l.find('INTERFACE') > -1:
data_start = i
break
# check that there are actually some data in the file
# the algorithm failed to load the class
if data_start >= len(lines):
warnings.warn('No interface was found was found.')
return False
# dictionary for the Interface attributes
ddicts = {}
for l in lines[1:]:
d = l.split()
# once we reach arain the Interface, we end
if l.find('INTERFACE') > -1:
break
# define record names and types
dnames = dict(
grid_parameters=['mode'],
synthetic_spectra_parameters=['order', 'step', 'padding'],
env_keys=['debug', 'adaptive_resolution']
)
dtypes = dict(
grid_parameters=[str],
synthetic_spectra_parameters=[int, float, float],
env_keys=[string2bool, string2bool]
)
# load all keys - env_vars, grid and synthetic spectra parameters
for dname in dnames.keys():
if d[0].find(dname) > -1:
# print d[0]
p = dnames[dname]
pt = dtypes[dname]
ddict = {d[i].strip(':'): d[i+1] for i in range(1, len(d), 2)}
# cast the variables to correct type
for k in ddict.keys():
i = p.index(k)
ddict[k] = pt[i](ddict[k])
# print ddict
ddicts[dname] = ddict
# print ddicts
# load the remaining data
rl = RegionList()
# print rl.load(f)
if not rl.load(f):
raise ValueError('No records on the RegionList were found in %s.' % f)
sl = StarList()
if not sl.load(f):
raise ValueError('No records on the StarList were found in %s.' % f)
fitter = Fitter()
if not fitter.load(f):
warnings.warn('No fitter was found in file %s' % f)
fitter = None
ol = ObservedList()
if not ol.load(f):
warnings.warn('No ObservedList was found in file %s' % f)
ol = None
# print ddicts
# print fitter
# setup the interface
itf = Interface(sl=sl, ol=ol, rl=rl, fitter=fitter, **ddicts['env_keys'])
itf.set_one_for_all(one4all)
gpars = {}
# print ddicts
# merge grid ans synthetic spectra parameters
for d in [ddicts['synthetic_spectra_parameters'], ddicts['grid_parameters']]:
for k in d.keys():
gpars[k] = d[k]
itf.set_grid_properties(**gpars)
itf.setup()
itf.populate_comparisons()
# self.choose_fitter(self.fitter.fittername)
# if we got here, we loaded the data
return itf
def populate_comparisons(self, l=None, demand_errors=False):
"""
Creates a synthetic spectrum for every record in
the comparisonList.
:param l
:param demand_errors
:return:
"""
if l is None:
l = self.comparisonList
# go over ech comparison in the list
for rec in l:
# get the region
region = rec['region']
# get the intensity and error
error = rec['error']
intens = rec['intens']
# go over each component
for c in rec['parameters'].keys():
pars = self.extract_parameters(rec['parameters'][c])
# use only those parameters that are not constrained with the grid
pars = {x: pars[x] for x in pars.keys() if x in self._not_given_by_grid}
# populate with the intensity vector of each component
if rec['observed'] is not None:
if demand_errors and rec['error'] is None:
raise ValueError('It is not allowed to call chi-square without having'
' uncertainties set.')
# extract the wavelength
wave = rec['wave']
# get the instrumental broadening
fwhm = rec['observed'].get_instrumental_width()
# define korelmode
korelmode = rec['observed'].korel
# generate the synthetic spectrum
rec['synthetic'][c] = self.synthetics[region][c].get_spectrum(wave=wave,
only_intensity=True,
korel=korelmode,
fwhm=fwhm,
**pars)
else:
wmin = rec['wmin']
wmax = rec['wmax']
error = None
korelmode = False
rec['synthetic'][c] = self.synthetics[region][c].get_spectrum(wmin=wmin,
wmax=wmax,
only_intensity=True,
korel=korelmode,
**pars)
# it is mandatory to provide errors for
# computation of the chi2
if error is not None:
# sum component spectra
for i, c in enumerate(rec['synthetic'].keys()):
if i == 0:
syn = rec['synthetic'][c].copy()
else:
syn = syn + rec['synthetic'][c]
# setup the chi2
rec['chi2'] = np.sum(((intens - syn) / error) ** 2)
def optimize_rv(self, fitter_name=None, groups=None, **fitter_kwargs):
"""
Optimizes radial velocities spectrum by spectrum.
:return:
"""
# turn off fitting of all parameters
for p in self.sl.get_parameter_types():
self.set_parameter(parname=p, fitted=False)
# if not defined, get rv groups
if groups is None:
groups = self.get_defined_groups(parameter='rv')
groups_list = []
for c in groups.keys():
groups_list.extend(groups[c]['rv'])
# rename back and make unique
groups = np.unique(groups_list)
# choose fitter
if fitter_name is not None:
self.choose_fitter(fitter_name, **fitter_kwargs)
# iterate over groups
for g in groups:
self.set_parameter(parname='rv', group=g, fitted=True)
l = self.get_comparisons(rv=g)
self.run_fit(l=l)
self.set_parameter(parname='rv', group=g, fitted=False)
def plot_all_comparisons(self, l=None, savefig=False, figname=None):
"""
Creates a plot of all setup comparisons.
:param l
:param savefig
:param figname
:return: None
"""
if figname is not None:
savefig = True
if l is None:
l = self.comparisonList
if len(l) == 0:
raise ValueError('The comparison list is empty. Did you run interface.setup() and interface.populate()?')
for i in range(0, len(l)):
self.plot_comparison_by_index(i, l=l, savefig=savefig, figname=figname)
def plot_comparison_by_index(self, index, l=None, savefig=False, figname=None):
"""
:param index
:param l
:param savefig
:param figname
:return:
"""
# the comparison
if l is None:
cpr = self.comparisonList[index]
else:
cpr = l[index]
# boundaries
reg = cpr['region']
wmin = self.rl.mainList[reg]['wmin']
wmax = self.rl.mainList[reg]['wmax']
# merge the spectra
if any([cpr['synthetic'][key] is None for key in cpr['synthetic'].keys()]):
raise ValueError('The synthetic spectra are not computed. Did you run Interface.populate_comparisons()?')
si = sum_dict_keys(cpr['synthetic'])
# names
if cpr['observed'] is not None:
obsname = cpr['observed'].filename.split('/')[-1]
else:
obsname = 'NONE'
synname = ''
for c in cpr['parameters']:
synname += 'Component: %s ' % c
pdict = self.extract_parameters(cpr['parameters'][c])
synname += str({k: "%.4f" % pdict[k] for k in pdict.keys()}) + '\n'
if cpr['observed'] is not None:
try:
w, oi, ei = cpr['observed'].get_spectrum(wmin, wmax)
except:
w, oi, = cpr['observed'].get_spectrum(wmin, wmax)
ei = np.zeros(len(w))
warnings.warn('Your data observed spectrum: %s has not errors attached!')
else:
w = np.linspace(wmin, wmax, len(si))
if figname is None:
figname = "_".join([obsname, 'wmin', str(int(wmin)), 'wmax', str(int(wmax))]) + '.png'
else:
figname = "_".join([figname, obsname, 'wmin', str(int(wmin)), 'wmax', str(int(wmax))]) + '.png'
savefig = True
if self.debug:
print "Plotting comparison: observed: %s" % obsname
print "Plotting comparison: synthetics: %s" % synname
# do the plot
fig = plt.figure(figsize=(16, 10), dpi=100)
ax = fig.add_subplot(211)
if cpr['observed'] is not None:
ax.errorbar(w, oi, yerr=ei, fmt='-', color='k', label=obsname)
ax.plot(w, si, 'r-', label=synname)
ax.set_xlim(wmin, wmax)
ax.set_ylim(0.95*si.min(), 1.05*si.max())
ax.set_xlabel('$\lambda(\AA)$')
ax.set_ylabel('$F_{\lambda}$(rel.)')
ax.legend(fontsize=8, fancybox=True, shadow=True, bbox_to_anchor=(1.0, 1.2))
if cpr['observed'] is not None:
ax = fig.add_subplot(212)
resid = oi-si
ax.plot(w, resid, 'y', label='residuals')
ax.set_xlabel('$\lambda(\AA)$')
ax.set_ylabel('$F_{\lambda}$(rel.)')
ax.set_xlim(wmin, wmax)
ax.set_ylim(0.95*resid.min(), 1.05*resid.max())
ax.legend(fontsize=8, loc=3)
# save the figure
if savefig:
plt.savefig(figname)
plt.close(fig)
def plot_convergence(self, f=None, parameter='chi2', component='all', group='all', savefig=True, figname=None):
"""
Plots convergence of the chi2 and parameters.
:param f
:param parameter
:param component
:param group
:param savefig
:param figname
:return:
"""
if f is None:
f = self.fitter.fitlog
if figname is not None:
savefig = True
# read the log
log = read_fitlog(f)
block = []
labels = []
# set the plotted parameters
if parameter.lower() == 'all':
parameters = np.unique(log['name'])
else:
parameters = [parameter]
if component.lower() == 'all':
components = np.unique(log['component'])
else:
components = [component]
if group.lower() == 'all':
groups = np.unique(log['group'])
else:
groups = [group]
# select those mathcing the choice
i = 0
for p, c, g in zip(log['name'], log['component'], log['group']):
if p not in parameters:
i += 1
continue
elif c not in components:
i += 1
continue
elif g not in groups:
i += 1
continue
else:
label = '_'.join(['p', p, 'c', c, 'g', str(g)])
labels.append(label)
block.append(log['data'][:, i])
i += 1
# append chi_square
if (parameter.lower() in ['chi2']) | (parameter == 'all'):
block.append(log['data'][:, -1])
labels.append('chi2')
# print labels
plot_convergence(np.column_stack(block), labels, figname=figname, savefig=savefig)
@staticmethod
def plot_convergence_mcmc(f='chain.dat', parameters='all', components='all', groups='all',
savefig=True, figname=None):
"""
Plots convergence of a mcmc_chain
:param f:
:param parameters:
:param components:
:param groups:
:param savefig:
:param figname:
:return:
"""
# load data
log, nwalkers, niter, npars = read_mc_chain(f)
# set the plotted parameters
if parameters == 'all':
parameters = np.unique(log['name'])
if components == 'all':
components = np.unique(log['component'])
if groups == 'all':
groups = np.unique(log['group'])
if any([isinstance(x, (float, int, str)) for x in [components, parameters, groups]]):
raise TypeError('Parameters (parameter, component, group) have to be either type list'
' or string == \'all\'.')
# an array for empty indices.
indices = []
labels = []
i = 0
# fill the array of indices
for p, c, g in zip(log['name'], log['component'], log['group']):
# do only the desired ones
for v, vals in zip([p, c, g], [parameters, components, groups]):
# print v, vals
if v not in vals:
i += 1
break
indices.append(i)
labels.append('_'.join(['c', c, 'p', p, 'g', str(g)]))
i += 1
# do the plot
# print len(indices), len(labels)
plot_walkers(log['data'], niter, nwalkers, indices=indices,
labels=labels, savefig=savefig, figname=figname)
@staticmethod
def plot_covariances_mcmc(f='chain.dat', l=None, treshold=100, parameters=None,
components=None, groups=None, nbin=20, savefig=True, figname=None):
"""
Plots covariances between selected parameters
:param f
:param l
:param treshold
:param parameters
:param components
:param groups
:param nbin
:param savefig
:param figname
:return:
"""
if figname is not None:
savefig = True
# reads the chan
log, nwalkers, niter, npars = read_mc_chain(f)
# set the plotted parameters
if parameters is None:
parameters = np.unique(log['name'])
if components is None:
components = np.unique(log['component'])
if groups is None:
groups = np.unique(log['group'])
if any([isinstance(x, (float, int, str)) for x in [components, parameters, groups]]):
raise TypeError('Parameters (parameter, component, group) have to be either type list'
' or string == \'all\'.')
# take only the part, where the sampler is burnt in
log['data'] = log['data'][nwalkers*treshold:,:]
# select those matching the choice
indices = []
labels = []
i = 0
# fill the array of indices
for p, c, g in zip(log['name'], log['component'], log['group']):
# do only the desired ones
saveind = True
for v, vals in zip([p, c, g], [parameters, components, groups]):
if v not in vals:
saveind = False
break
if saveind:
indices.append(i)
labels.append('_'.join(['c', c, 'p', p, 'g', str(g)]))
i += 1
# do the corner plot
corner.corner(log['data'][:,indices], bins=nbin, labels=labels,
quantiles=(0.67*np.ones(len(indices))).tolist(),
truths=(np.zeros(len(indices))).tolist()
)
# save the figure
if savefig:
if figname is None:
figname = 'correlations.png'
plt.savefig(figname)
@staticmethod
def plot_variances_mcmc(f=None, l=None, parameters=None, components=None, groups=None, nbin=20,
treshold=100, savefig=True, figname=None):
"""
Plots covariances between selected parameters
:param f
:param l
:param treshold
:param parameters
:param components
:param groups
:param nbin
:param savefig
:param fignamez
:return:
"""
if any([isinstance(x, (float, int, str)) for x in [components, parameters, groups]]):
raise TypeError('Parameters (parameter, component, group) have to be either type list'
' or string == \'all\'.')
if figname is not None:
savefig = True
# reads the chan
log, nwalkers, niter, npars = read_mc_chain(f)
# set the plotted parameters
if parameters is None:
parameters = np.unique(log['name'])
if components is None:
components = np.unique(log['component'])
if groups is None:
groups = np.unique(log['group'])
# take only the part, where the sampler is burnt in
log['data'] = log['data'][nwalkers*treshold:,:]
# select those mathcing the choice
npar = len(log['name'])
for i in range(1, npar):
for j in range(0, i):
# extract individual values
p1 = log['name'][i]
c1 = log['component'][i]
g1 = log['group'][i]
# end if there are no components matching our
# choice of components, groups and parameters
if any([p.lower() not in parameters for p in [p1]]):
continue
if any([c.lower() not in components for c in [c1]]):
continue
if any([g not in groups for g in [g1]]):
continue
# setup labels
label1 = '_'.join(['p', p1, 'c', c1, 'g', str(g1).zfill(2)])
# setup plotted data
x = log['data'][:, i]
# do the oplot
plot_variance(x,nbin=nbin, label=label1, savefig=savefig, figname=figname)
def propagate_and_update_parameters(self, l, pars):
"""
:param l
:param pars
:return:
"""
# parameters are passed by reference, so
# this should also change the starlist
# and corresponding
fitpars = self.sl.get_fitted_parameters()
if len(pars) != len(fitpars):
raise ValueError('Length of the vector passed with the fitting environment does '
'mot match length of the parameters marked as fitted.')
for i, v in enumerate(pars):
fitpars[i]['value'] = v
# we have to recompute the synthetic spectra
# if one grid parameter was passed
# first check for which parameters
# the grid parameters are fitted
components_to_update = []
for c in self.sl.fitted_types.keys():
for rec in self.sl.fitted_types[c]:
# recompute only those components for those
# grid parameter is fitted
if rec not in self._not_given_by_grid:
components_to_update.append(c)
# update the synthetic spectra
if len(components_to_update) > 0:
self.ready_synthetic_spectra(complist=components_to_update)
# populate the comparison
self.populate_comparisons(l=l, demand_errors=True)
def ready_synthetic_spectra(self, complist=[]):
"""
Readies the synthetic spectra for each region.
:param complist list of components that will be re-computed,
:return:
"""
# if there is no list of components
# for which to set the synthetic
# parameters
if len(complist) == 0:
complist = self.sl.get_components()
# regime in which we use one long spectrum
if self.one4all:
wl = self.rl.get_wavelengths()
wmin = np.min(wl)
wmax = np.max(wl)
for reg in self.rl._registered_regions:
# add the region to synthetics
if reg not in self.synthetics.keys():
self.synthetics[reg] = dict()
# wavelength_boundaries
if not self.one4all:
wmin = self.rl.mainList[reg]['wmin']
wmax = self.rl.mainList[reg]['wmax']
# get all parameters for a given region
reg_groups = self.rl.mainList[reg]['groups'][0]
reg_groups = {x: reg_groups[x] for x in reg_groups.keys()
if x not in self._not_given_by_grid}
grid_pars = [x for x in self.sl.get_physical_parameters()
if x not in self._not_given_by_grid]
# setup default groups - ie zero
for par in grid_pars:
if par not in reg_groups.keys():
reg_groups[par] = 0
# get list of Parameters
parlist = self.sl.get_parameter(**reg_groups)
for c in complist:
# convert Parameter list to dictionary
params = self.extract_parameters(parlist[c])
# print params
# padding has to be relatively large, since
# we do not know what the rvs will be
if self.debug:
print "Creating SyntheticSpectrum: params: %s wmin: %s wmax: %s" % (str(params),
str(wmin),
str(wmax))
if not self.one4all:
self.synthetics[reg][c] = self.grids[reg].get_synthetic_spectrum(params,
np.array([wmin, wmax]),
**self._synthetic_spectrum_kwargs)
else:
self.synthetics[reg][c] = self.grids['all'].get_synthetic_spectrum(params,
np.array([wmin, wmax]),
**self._synthetic_spectrum_kwargs)
def read_chi2_from_comparisons(self, l=None, verbose=False):
"""
Reads the chi-squares from the list.
:param l:
:return:
"""
# work with the min comparisonList if no other
# is provided
if l is None:
l = self.comparisonList
chi2 = 0.0
if verbose:
chi2_detailed = []
# read out the chi squares
for i in range(0, len(l)):
chi2 += l[i]['chi2']
# if verbosity is desired a detailed chi-square
# info on each region is returned
if verbose:
chi2_detailed.append(dict(chi2=l[i]['chi2'],
region=self.rl.mainList[l[i]['region']],
rv_group=l[i]['groups']['rv']))
if verbose:
return chi2, chi2_detailed
else:
return chi2
def ready_comparisons(self):
"""
This function creates a dictionary, which is one of the
cornerstones of the class. It creates a list of all
combinations of the parameters.
:return:
"""
# start a list of comparisons that will
# be carried out with the given dataset
self.comparisonList = []
# go region by region
for reg in self.rl.mainList.keys():
# fitted region
wmin = self.rl.mainList[reg]['wmin']
wmax = self.rl.mainList[reg]['wmax']
# region-dfined groups and parameters
reg_groups = copy.deepcopy(self.rl.mainList[reg]['groups'][0])
phys_pars = [x for x in self.sl.get_physical_parameters() if x not in ['rv']]
# print reg, phys_pars, reg_groups
# if the group is not defined, it is zero
for par in phys_pars:
if par not in reg_groups.keys():
reg_groups[par] = 0
# create a list of unique rv groups
rv_groups = self.sl.get_defined_groups(parameter='rv')
rv_groups = [rv_groups[key]['rv'] for key in rv_groups.keys()]
temp = []
for row in rv_groups:
temp.extend(row)
rv_groups = np.unique(temp)
for rv_group in rv_groups:
# append rv_group to groups
all_groups = copy.deepcopy(reg_groups)
all_groups['rv'] = rv_group
# append rv parameter to the remaining parameters
# rv_pars = self.sl.get_parameter(rv=rv_group)
# get unique set of parameters for a given group
all_pars = self.sl.get_parameter(**all_groups)
# for c in rv_pars.keys():
# all_pars[c].extend(rv_pars[c])
if self.ol is not None:
if rv_group not in self.rel_rvgroup_region[reg]:
continue
# the wmin wmax is used to check again that
# we are in the correct region.
if self.debug:
print "Queried parameters in ready comparisons:", wmin, wmax, rv_group
obs = self.ol.get_spectra(wmin=wmin, wmax=wmax, rv=rv_group)
if len(obs) == 0:
continue
else:
obs = [None]
# add the comparison for each observed spectrum
# because in an unlikely event, when we fit the
# same RVs for several spectra
for o in obs:
# What if we are only generating spectra???
# If there are spectra attached we are
# comparing and thats it!!
if o is None:
c = 'all'
else:
c = o.component
if c != 'all':
temp_all_pars = {c: all_pars[c]}
else:
temp_all_pars = all_pars
self.add_comparison(region=reg,
parameters=temp_all_pars,
groups=all_groups,
observed=o,
)
def ready_comparisons_spectrum_by_spectrum(self):
"""
This function creates a dictionary, which is one of the
cornerstones of the class. It creates a list of all
combinations of the parameters.
:return:
"""
# print self
# start a list of comparisons that will
# be carried out with the given dataset
self.comparisonList = []
# go region by region
for reg in self.rl.mainList.keys():
# fitted region
wmin = self.rl.mainList[reg]['wmin']
wmax = self.rl.mainList[reg]['wmax']
# generate a dictionary of unique groups for each parameter
unique_groups = {}
# phys_pars = [par for par in self.sl.get_physical_parameters() if par not in ['lr']]
phys_pars = self.sl.get_physical_parameters()
for par in phys_pars:
groups = self.sl.get_defined_groups(parameter=par)
temp = []
for c in groups.keys():
print groups[c][par]
temp.extend(groups[c][par])
unique_groups[par] = np.unique(temp).tolist()
# print unique_groups
# position in the row of each parameter
position = {key: 0 for key in unique_groups.keys()}
keys = unique_groups.keys()
# print position
# print unique_groups
# THIS IS PROBABLY THE MOST IDIOTIC WAY HOW TO GET
# ALL COMBINATIONS BETWEEN RECORDS IN N DIFFERENT LISTS
# SURPRISINGLY IT DOES NOT GENERATE REDUNDANT COMPARISONS
# It iterates over the positions list until for each
# record in the list position[i] == len(unique_groups[i])
# both are dictionaries of course
i = 0
all_groups_list = []
# while position[keys[-1]] >= len(unique_groups[keys[-1]])-1:
while True:
# append the current groups
temp = {key: unique_groups[key][position[key]] for key in keys}
all_groups_list.append(temp)
# search until you find a list of lenght > 1 or till the end
while i < len(keys) and (position[keys[i]] == len(unique_groups[keys[i]])-1):
i += 1
# if end was reached - end
if not i < len(keys):
break
else:
# else increment the record and start over
position[keys[i]] += 1
for j in range(0, i):
position[keys[j]] = 0
i = 0
# for rec in all_groups_list:
# print rec
for rec in all_groups_list:
# get unique set of parameters for a given group
all_pars = self.sl.get_parameter(**rec)
if self.ol is not None:
# if rv_group not in self.rel_rvgroup_region[reg]:
# continue
# the wmin wmax is used to check again that
# we are in the correct region.
obs = self.ol.get_spectra(wmin=wmin, wmax=wmax, permissive=True, **rec)
if len(obs) == 0:
continue
else:
obs = [None]
# add the comparison for each observed spectrum
# because in an unlikely event, when we fit the
# same RVs for several spectra
for o in obs:
# What if we are only generating spectra???
# If there are spectra attached we are
# comparing and thats it!!
if o is None:
c = 'all'
else:
c = o.component
if c != 'all':
temp_all_pars = {c: all_pars[c]}
else:
temp_all_pars = all_pars
self.add_comparison(region=reg,
parameters=temp_all_pars,
groups=rec,
observed=o,
)
def remove_parameter(self, component, parameter, group):
"""
:param component: component for which the parameter is deleted
:param parameter:deleted paramer
:param group
:return:
"""
self.sl.remove_parameter(component, parameter, group)
def run_fit(self, l=None, verbose=False):
"""
Starts the fitting
:param l:
:param verbose:
:return:
"""
# update fitted parameters
self.update_fitter()
# set the identification of fitted parameters
self.fitter.set_fit_properties(self.sl.get_fitted_parameters(True)[1])
# this starts recording of each iteration chi2
self.fit_is_running = True
# runs the fitting
self.fitter(self.compute_chi2, l, verbose)
# copy the fit into the whole structure
self.accept_fit()
# writes the remaining iterations within the file
self.fitter.flush_iters()
# turn of the fitting
self.fit_is_running = False
def run_bootstrap(self, limits, outputname=None, decouple_rv=True, niter=100, sub_niter=3):
"""
Runs bootstrap simulation to estimate the errors. The initial parameter set is chosen
randomly in the vicinity of the solution that is stored within the Interface type.
:param limits: format dict(component1=dict(rv=[low, high], teff=[low, high],..),
component2=dict(..), ..), where the range in which the random number is
(stored_value - low, stored_value + high).
:param outputname: Prefix name for result of each bootstrap iteration.
:param decouple_rv: Should the rvs be fitted separately from the remaining parameters?
:param niter: Number of bootstrap iteration.
:param sub_niter: Number of subiteration, where rv is fitted first and then the
remaining parameters. This parameter is irrelevant for decouple_rv = False.
:return:
"""
# set outputname of each iteration
if outputname is None:
outputname = 'bootstrap'
# niter samples are computed
for i in range(niter):
# create an interface with a random data sample
itf = self.draw_random_sample()
# set a random starting point within limits
for c in limits.keys():
for p in limits[c].keys():
# user supplied limits
bs_vmin = limits[c][p][0]
bs_vmax = limits[c][p][1]
# get all defined groups
groups = itf.get_defined_groups(component=c, parameter=p)[c][p]
# for each group set random starting point
for g in groups:
# for each group, parameter and component
# get value, minimal and maximal
par = itf.sl.get_parameter(**{p : g})[c][0]
value = par.value
vmin = par.vmin
vmax = par.vmax
# set boundaries where random number is drawn
llim = max([value - bs_vmin, vmin])
ulim = min([value + bs_vmax, vmax])
# draw the random number
rn = llim + (ulim - llim) * np.random.random()
# set it to parameter
par.value = rn
par.vmin = max([vmin, value - 2 * bs_vmin])
par.vmax = min([vmax, value + 2 * bs_vmax])
# set outputname for one fit
outputname_one_iter = '.'.join([outputname, str(i).zfill(3), 'sav'])
# get list of fitted parameters
fitpars = {}
for c in itf.sl.componentList.keys():
fitpars[c] = []
for p in itf.sl.componentList[c].keys():
for k in range(0, len(itf.sl.componentList[c][p])):
if itf.sl.componentList[c][p][k].fitted:
fitpars[c].append(p)
break
#sys.exit(0)
# now proceed with the fittingss
itf.save('.'.join([outputname, 'initial', str(i).zfill(3), 'sav']))
if decouple_rv:
# do several iterations, fitting rv and remaining parameters
for j in range(sub_niter):
# turn off fitting of radial velocity
itf.set_parameter(parname='rv', fitted=False)
# turn on remaining parameters
for c in fitpars.keys():
for p in fitpars[c]:
itf.set_parameter(parname=p, component=c, fitted=True)
# run the fit - not radial velocities
itf.run_fit()
#print itf
#print itf.list_comparisons()
# itf.save('.'.join(['before_rv', str(i).zfill(3), str(j).zfill(2), 'sav']))
# run the fit - radial velocities
itf.optimize_rv()
#print itf
#print itf.list_comparisons()
# itf.save('.'.join(['after_rv', str(i).zfill(3), str(j).zfill(2), 'sav']))
else:
itf.run_fit()
# save the result
itf.save(outputname_one_iter)
def run_mcmc(self, chain_file='chain.dat', nwalkers=None, niter=500, l=None, verbose=False):
"""
Runs the mcmc error estimation.
:return:
"""
# pass on the fit properties
self.fitter.set_fit_properties(self.sl.get_fitted_parameters(True)[1])
# update the boundaries
vmins = self.get_fitted_parameters(attribute='vmin')
vmaxs = self.get_fitted_parameters(attribute='vmax')
self.fitter.set_lower_boundary(vmins)
self.fitter.set_upper_boundary(vmaxs)
# get the values
vals = self.get_fitted_parameters(attribute='value')
# set up number of walkers
if nwalkers is None:
nwalkers = 4*len(vals)
# run the mcmc sampling
self.fitter.run_mcmc(self.compute_chi2, chain_file, vals, nwalkers, niter, l, verbose)
def save(self, ofile):
"""
Saves the interface as a text file.
:param ofile: file or filehandler
:return:
"""
# open the file
if isinstance(ofile, str):
ofile = open(ofile, 'w')
# Setup the interface variables first.
string = ' INTERFACE '.rjust(105, '#').ljust(200, '#') + '\n'
# set the grid properities
string += 'grid_parameters: '
for key in self._grid_kwargs.keys():
if key not in ['debug']:
string += '%s: %s ' % (key, str(self._grid_kwargs[key]))
string += '\n'
# set the synthetic spectra parameters
string += 'synthetic_spectra_parameters: '
for key in self._synthetic_spectrum_kwargs.keys():
string += '%s: %s ' % (key, str(self._synthetic_spectrum_kwargs[key]))
string += '\n'
# Set the environmental keys
enviromental_keys = ['adaptive_resolution', 'debug']
string += 'env_keys: '
for ekey in enviromental_keys:
string += "%s: %s " % (ekey, str(getattr(self, ekey)))
string += '\n'
# finalize the string
string += ' INTERFACE '.rjust(105, '#').ljust(200, '#') + '\n'
ofile.writelines(string)
# save the starlist
self.sl.save(ofile)
# save the fitter
self.fitter.save(ofile)
# save the regions
self.rl.save(ofile)
# save the observed list - if any was given
# and compute the chi-square
if self.ol is not None:
# saves the observed list
self.ol.save(ofile)
# saves the chi-square and degrees of freedom
string = ' CHI-SQUARE '.rjust(105, '#').ljust(200, '#') + '\n'
# compute chi2 and ddof
chi2 = self.compute_chi2()
ddof = self.get_degrees_of_freedom()
# save it within the asc file
string += 'Chi^2: %s Degrees_Of_Freedom: %s Reduced Chi^2: %s\n' % \
(str(chi2), str(ddof), str(chi2 / ddof))
string += ' CHI-SQUARE '.rjust(105, '#').ljust(200, '#') + '\n'
ofile.writelines(string)
def setup(self):
"""
This function probes the observed and
region list and propagates group definitions
from them to the starlist.
:return:
"""
# first setup region groups
if self.rl is not None:
region_groups = self.rl.get_region_groups()
self.sl.set_groups(region_groups)
else:
self.rl = RegionList(debug=self.debug)
self.rl.get_regions_from_obs(copy.deepcopy(self.ol.observedSpectraList['spectrum']))
# TODO setting up the region <-> rv relation better - this is a quick fix
# TODO and unlikely a robust one
self.rel_rvgroup_region = {reg: [0] for reg in self.rl.get_registered_regions()}
region_groups = self.rl.get_region_groups()
self.sl.set_groups(region_groups)
# print self
# setup radial velocity groups
if self.ol is not None:
# we will fit some parameters separately at some spectra
# therefore all groups are assihgne dfrom the data, not only
# the radial velocities
# check that all fitted spectra fit within at least one
# spectral region
self.verify_spectra_and_regions()
if self.spectrum_by_spectrum is not None:
# setup groups for each spectrum
# relative luminosity is given by spectra region, not the spectrum itself
phys_pars = [par for par in self.sl.get_physical_parameters() if par not in 'lr']
# parameters that will be owned by each spectrum
varparams = self.spectrum_by_spectrum
# common parameters
fixparams = [par for par in phys_pars if par not in self.spectrum_by_spectrum]
self._set_groups_to_observed(varparams, fixparams)
self._setup_all_groups()
else:
# print self
self._setup_rv_groups()
# print self
# setup the wavelength step of synthetic spectra
# from observed psectra
if self.adaptive_resolution:
step = self.ol.get_resolution()
if self.debug:
print "The step size of the grid is: %s Angstrom." % str(step/2.)
self.set_grid_properties(step=step/2.)
else:
warnings.warn('There are no data attached, so all regions are set to '
'have the same radial velocity. Each component can have'
'different velocity of course.')
# attach grids to the interface
self._setup_grids()
# create the basic interpolated spectra
self.ready_synthetic_spectra()
# setup all comparisons
if self.spectrum_by_spectrum is not None:
self.ready_comparisons_spectrum_by_spectrum()
else:
self.ready_comparisons()
# setup fitter
if self.fitter is None:
self.fitter = Fitter(debug=self.debug)
# at the end the comparisons synthetic spectra are populated
self.populate_comparisons()
def set_grid_properties(self, **kwargs):
"""
:param kwargs: padding - number of spectra to use for
padding of synthetic spectra
:param kwargs: order - maximal number of spectra
for interpolation
:return:
"""
# if we pass step, we turn off
# adaptive resolution
if 'step' in kwargs.keys():
self.adaptive_resolution = False
for k in kwargs.keys():
# setup grid parameters
if k in self._grid_kwargs.keys():
self._grid_kwargs[k] = kwargs[k]
# setup synthetic spectra parameters
elif k in self._synthetic_spectrum_kwargs.keys():
self._synthetic_spectrum_kwargs[k] = kwargs[k]
else:
raise KeyError('Key: %s is not a property of either the grid or synthetic spectra. '
'The only parameters adjustable with this function are: '
' %s for grid and % for synthetic spectra.'
% (k,
str(self._grid_kwargs.keys()),
str(self._synthetic_spectrum_kwargs)))
def _set_groups_to_observed(self, varparams, fixparams):
"""
:param varparams parameters whose group number should vary from spectrum to spectrum
:param fixparams parameters whose group should be the same for all spectra
:return:
"""
if self.ol is None:
raise AttributeError('No data are attached.')
else:
for i in range(0, len(self.ol)):
# setup varying parameters
for vpar in varparams:
if vpar not in self.ol.observedSpectraList['group'].keys():
self.ol.observedSpectraList['group'][vpar] = np.zeros(len(self.ol))
self.ol.observedSpectraList['group'][vpar][i] = i+1
# setup fixed parameters
for fpar in fixparams:
if fpar not in self.ol.observedSpectraList['group'].keys():
self.ol.observedSpectraList['group'][fpar] = np.zeros(len(self.ol))
self.ol.observedSpectraList['group'][fpar][i] = 0
# set the groups from table to spectra
self.ol._set_groups_to_spectra()
def set_one_for_all(self, switch):
"""
Sets usage of one grid for all regions. This is faster.
When we do not have lots of empty regions between fitted
regions. It reduces number of spectra loading required
but increases the duration of interpolation,
:param switch turn on/off the fitting
:return:
"""
if not isinstance(switch, (bool, int)):
raise TypeError('Switch of the one4all regime must have type bool.')
self.one4all = switch
self._setup_grids()
def set_parameter(self, component='all', parname=None, group='all', **kwargs):
"""
:param component:
:param parname
:param group:
:param kwargs: keywords to be set up for each parameter
:return:
"""
# check the results
if parname is None:
print "I cannot adjust parameter: %s." % str(parname)
if len(kwargs.keys()) == 0:
return
# setup the components
if component == 'all':
component = self.sl.get_components()
else:
component = [component]
# create a list of unique groups if all are needed
if group is 'all':
groups = []
dict_groups = self.sl.get_defined_groups(parameter=parname)
for c in dict_groups.keys():
groups.extend(dict_groups[c][parname])
groups = np.unique(groups)
else:
groups = [group]
# propagate to the star
for c in component:
for g in groups:
# print c, g, kwargs
self.sl.set_parameter(parname, c, g, **kwargs)
# print self
# recompute synthetic spectra
if (parname not in self._not_given_by_grid) & ('value' in kwargs.keys()):
self.ready_synthetic_spectra()
# update the fitter if number of fitted
# parameters changes
if 'fitted' in kwargs.keys() and self.fitter.fittername is not None:
fitparams = self.get_fitted_parameters()
self.choose_fitter(name=self.fitter.fittername, fitparams=fitparams, **self.fitter.fit_kwargs)
def set_error(self, parname='rv', component=None, error=1.0):
"""
Sets error by adjusting vmin, vmax,
:param parname: name of the parameter
:paramn components
:param error: the error, ehich will be used to set boundaries
:return:
"""
if component is not None:
components = [component]
else:
components = self.sl._registered_components
# get all fitted parameters
parname = parname.lower()
for c in components:
if parname in self.sl.componentList[c].keys():
for p in self.sl.componentList[c][parname]:
v = p['value']
# relative luminosity needs special treatment
if p['name'] == 'lr':
p['vmin'] = max([0.0, v - error])
p['vmax'] = min([1.0, v + error])
# and so does also the rotational velocity
elif p['name'] == 'vrot':
p['vmin'] = max([0.0, v - error])
p['vmax'] = v + error
# and the rest is simple
else:
p['vmin'] = v - error
p['vmax'] = v + error
def _setup_grids(self):
"""
Initializes grid of synthetic spectra for each region -
i.e. there is no point in calling the function without
having the regions set up.
:params kwargs -see pyterpol.
:return:
"""
if not self.one4all:
for reg in self.rl.mainList.keys():
self.grids[reg] = SyntheticGrid(**self._grid_kwargs)
else:
# assume that there is only one grid for all
self.grids['all'] = SyntheticGrid(**self._grid_kwargs)
def _setup_rv_groups(self):
"""
Setting up the rv_groups is a pain..
:return:
"""
# TODO Can this be done better?????
# empty array for components where cloning
# was performed - to get rid of the first
# group
cloned_comps = []
registered_groups = []
# dictionary for newly registered groups
# this is necessary in case we do not
# the newly registered groups have to
# be assigned back to the spectra
# otherwise we would not know which rv
# belongs to which spectrum
new_groups = dict()
# get wavelength boundaries of defined regions
wmins, wmaxs, regs = self.rl.get_wavelengths(verbose=True)
# this dictionary is needed to have
# unambiguous relationship between
# rv_group, spectrum and region
reg2rv = {x: [] for x in regs}
# for every region we have a look if we have some datas
for wmin, wmax, reg in zip(wmins, wmaxs, regs):
# query spectra for each region
observed_spectra = self.ol.get_spectra(wmin=wmin, wmax=wmax)
for i, spectrum in enumerate(observed_spectra):
# read out properties of spectra
component = spectrum.component
# there can be more spectral groups
rv_groups = spectrum.group['rv']
if not isinstance(rv_groups, (list, tuple)):
rv_groups = [rv_groups]
for rv_group in rv_groups:
# readout groups that were already defined for all components
def_groups = self.sl.get_defined_groups(component='all', parameter='rv')['all']['rv']
# We define group for our observation
if rv_group is None:
gn = generate_least_number(def_groups)
reg2rv[reg].append(gn)
# save the newly registered group
if spectrum.filename not in new_groups.keys():
new_groups[spectrum.filename] = []
new_groups[spectrum.filename].append(gn)
elif rv_group not in def_groups:
gn = rv_group
reg2rv[reg].append(rv_group)
# if the group is defined we only need to
# add it among the user defined one, so it
# so it is not deleted later
elif rv_group in def_groups:
registered_groups.append(rv_group)
reg2rv[reg].append(rv_group)
continue
# attachs new parameter to the StarList
# print component, gn
self.sl.clone_parameter(component, 'rv', group=gn)
if component not in cloned_comps:
if component == 'all':
cloned_comps.extend(self.sl.get_components())
else:
cloned_comps.append(component)
registered_groups.append(gn)
# print registered_groups, cloned_comps
# remove the default groups
for c in cloned_comps:
gref = self.sl.componentList[c]['rv'][0]['group']
if gref not in registered_groups:
self.remove_parameter(c, 'rv', gref)
# back register the group numbers to the observed spectra
for filename in new_groups.keys():
# print new_groups[filename]
self.ol.set_spectrum(filename=filename, group={'rv': new_groups[filename]})
# print self
# finalize the list of rv_groups for each region
self.rel_rvgroup_region = {x: np.unique(reg2rv[x]).tolist() for x in reg2rv.keys()}
def _setup_all_groups(self):
"""
Setting up all groups from observations is even a bigger pain.
:return:
"""
# get wavelength boundaries of defined regions
wmins, wmaxs, regs = self.rl.get_wavelengths(verbose=True)
# this dictionary is needed to have
# unambiguous relationship between
# rv_group, spectrum and region
reg2rv = {x: [] for x in regs}
# physical parameters
phys_pars = self.sl.get_physical_parameters()
phys_pars = [par for par in phys_pars if par not in ['lr']]
# for every region we have a look if we have some datas
for p_par in phys_pars:
new_groups = dict()
cloned_comps = []
registered_groups = []
for wmin, wmax, reg in zip(wmins, wmaxs, regs):
# query spectra for each region
observed_spectra = self.ol.get_spectra(wmin=wmin, wmax=wmax)
# go over each observed spectrum
for i, spectrum in enumerate(observed_spectra):
# read out properties of spectra
component = spectrum.component
# if the group is not defined for the s
if p_par in spectrum.group.keys():
p_group = copy.deepcopy(spectrum.group[p_par])
else:
# self.ol.set_spectrum(spectrum.filename, group={p_par:0})
p_group = None
# readout groups that were already defined for all components
def_groups = self.sl.get_defined_groups(component='all', parameter=p_par)['all'][p_par]
# print p_par, def_groups
# We define group for our observation
if p_group is None:
if p_par == 'rv':
gn = generate_least_number(def_groups)
reg2rv[reg].append(gn)
# for other than rvs, the default group is 0
else:
# self.ol.set_spectrum(filename=spectrum.filename, group={p_par: 0})
# spectrum.group[p_par]=0
continue
# save the newly registered group
if spectrum.filename not in new_groups.keys():
new_groups[spectrum.filename] = []
new_groups[spectrum.filename].append(gn)
elif p_group not in def_groups:
gn = p_group
reg2rv[reg].append(p_group)
# if the group is defined we only need to
# add it among the user defined one, so it
# so it is not deleted later
elif p_group in def_groups:
registered_groups.append(p_group)
reg2rv[reg].append(p_group)
continue
# attachs new parameter to the StarList
# print component, gn
self.sl.clone_parameter(component, p_par, group=gn)
if component not in cloned_comps:
if component == 'all':
cloned_comps.extend(self.sl.get_components())
else:
cloned_comps.append(component)
registered_groups.append(gn)
# print registered_groups, cloned_comps
# remove the default groups
for c in cloned_comps:
gref = self.sl.componentList[c][p_par][0]['group']
if gref not in registered_groups:
self.remove_parameter(c, p_par, gref)
# print new_groups
# back register the group numbers to the observed spectra
for filename in new_groups.keys():
# print p_par, new_groups
self.ol.set_spectrum(filename=filename, group={'rv': new_groups[filename]})
# finalize the list of rv_groups for each region
self.rel_rvgroup_region = {x: np.unique(reg2rv[x]).tolist() for x in reg2rv.keys()}
def update_fitter(self):
"""
Pass the fitted parameters to the fitter.
:return:
"""
# get the fitted parameters
fitpars = self.get_fitted_parameters()
name = self.fitter.fittername
kwargs = self.fitter.fit_kwargs
# update the fitted parameters
self.choose_fitter(name, fitparams=fitpars, **kwargs)
def verify_spectra_and_regions(self):
"""
Checks that all fitted spectra fit into at least one region.
If not an error is raised
:return:
"""
# get all defined regions
wmins, wmaxs = self.rl.get_wavelengths()
# go over each spectrum
for spectrum in self.ol.observedSpectraList['spectrum']:
wave = spectrum.get_wavelength()
owmin = wave.min()
owmax = wave.max()
# check whether the spectrum fits into at least one
# region
is_within = False
for wmin, wmax in zip(wmins, wmaxs):
if (wmin > owmin) & (wmax < owmax):
is_within = True
break
if not is_within:
warnings.warn('The spectrum:\n%s does not fit into any defined spectral region. These '
'spectra will be excluded from fitting.' % str(spectrum))
@staticmethod
def write_mc_result(f, treshold=100, outputname='fit.res'):
"""
Writes the result of fitting
:param f a fitting log
:param outputname
:param treshold
:return:
"""
# returns a dictionary of fitted parameters and their uncertainties
pars = Interface.evaluate_mcmc(f, treshold=treshold)
# creates the output string
string = ''
for c in pars.keys():
for p in pars[c].keys():
for row in pars[c][p]:
string += 'c:%15s p:%6s ' % (c, p)
string += 'g:%3i ' % (row['group'])
for key in ['best', 'gauss_mean', 'gauss_sigma', 'lower', 'upper']:
string += "%6s: %10.4f " % (key, row[key])
string += '\n'
# writes it to a file
ofile = open(outputname, 'w')
ofile.writelines([string])
ofile.close()
def write_rvs(self, outputname=None):
"""
Writes RVs defined to all groups --- usually there
is only one spectrum per group.
:param outputname: file where the output is written
:return: rvs -- radial velocities per component and group,
allgroup -- a list of all defined rv groups
names --list of spectra names
"""
# get define groups
groups = self.get_defined_groups(component='all', parameter='rv')
# get a parameter
components = self.sl._registered_components
# get a list of unique groups
allgroups = []
for c in components:
allgroups.extend(groups[c]['rv'])
allgroups = np.unique(allgroups)
# get all components for a given group
rvs = {c: [] for c in components}
names = []
hjds = []
groups = []
for i, g in enumerate(allgroups):
# get all observed spectra corresponding to the group
obspecs = self.ol.get_spectra(rv=g)
# get the radial velocities
pars = self.sl.get_parameter(rv=g)
for obspec in obspecs:
for j, c in enumerate(components):
# append radial velocity
if c in pars.keys():
rvs[c].append(pars[c][0]['value'])
# if an component is missing -9999.999 is assigned instead
else:
rvs[c].append(-9999.9999)
# append name and hjd and group
names.append(obspec.filename)
hjds.append(obspec.hjd)
groups.append(g)
if outputname is not None:
# opent the file
ofile = open(outputname, 'w')
# switch for writing hjds
has_hjd = any([x is not None for x in hjds])
# write the header
if has_hjd:
ofile.write("#%5s%20s%20s" % ('GROUP', 'FILENAME', 'HJD'))
else:
ofile.write("#%5s%20s" % ('GROUP', 'FILENAME'))
for j in range(0, len(components)):
ofile.write("%15s" % components[j].upper())
ofile.write('\n')
# write teh rvs
for i in range(0, len(names)):
# what of HJD is not assigned
if has_hjd:
ofile.write("%6s%20s%20s" % (str(groups[i]).zfill(3), names[i], str(hjds[i])))
else:
ofile.write("%6s%20s" % (str(groups[i]).zfill(3), names[i]))
for c in components:
ofile.write("%15.6f" % rvs[c][i])
ofile.write('\n')
return rvs, allgroups, names
def write_shifted_spectra(self, outputfile=None, residuals=False):
"""
:return:
"""
# setup name prefix
if outputfile is None:
outputfile = ''
# go over each record within comparisonList
for cp in self.comparisonList:
if residuals:
outputfile = cp['observed'].filename
# extract description of the comparison
wave = cp['wave']
intens = sum_dict_keys(cp['synthetic'])
wmin = cp['wmin']
wmax = cp['wmax']
component = cp['observed'].component
korel = cp['observed'].korel
rvgroup = cp['groups']['rv']
# set name
name = '_'.join([outputfile, 'c', component, 'wmin', str(wmin), 'wmax', str(wmax), 'g', str(rvgroup)]) \
+ '.dat'
# construct header of the file
header = ''
header += '# Component: %s\n' % str(component)
header += '# Region: (%s,%s)\n' % (str(wmin), str(wmax))
header += '# KOREL: %s\n' % str(korel)
header += '# Residual: %s\n' % str(residuals)
# write the synthetic spectrum
ofile = open(name, 'w')
ofile.writelines(header)
if residuals:
oi = cp['observed'].get_spectrum(wmin, wmax)[1]
np.savetxt(ofile, np.column_stack([wave, oi - intens]), fmt='%15.8e')
else:
np.savetxt(ofile, np.column_stack([wave, intens]), fmt='%15.8e')
ofile.close()
def write_synthetic_spectra(self, component=None, region=None, rvgroups=None, outputname=None, korel=False):
"""
Writes the synthetic spectra obtained through the fitting.
:param component
:param region
:param outputname
:param korel
:return:
"""
# set defaults for component
if component is None:
components = self.sl.get_components()
if isinstance(component, str):
components = [component]
# set defaults for region
if region is None:
regions = self.rl.get_registered_regions()
if isinstance(region, str):
regions = [region]
# go over each region
for r in regions:
# get the wavelengths
wmin = self.rl.mainList[r]['wmin']
wmax = self.rl.mainList[r]['wmax']
# get defined groups for the region
reg_groups = copy.deepcopy(self.rl.mainList[r]['groups'][0])
phys_pars = [x for x in self.sl.get_physical_parameters() if x not in ['rv']]
for par in phys_pars:
if par not in reg_groups.keys():
reg_groups[par] = 0
# get regional parameters
reg_pars = self.sl.get_parameter(**reg_groups)
for c in components:
# get defined rv groups
if rvgroups is None:
rv_groups = self.sl.get_defined_groups(component=c, parameter='rv')[c]['rv']
else:
if not isinstance(rv_groups, (list, tuple)):
rv_groups = [rv_groups]
for rvg in rv_groups:
# the outputname
if outputname is not None:
oname = '_'.join([outputname, 'c', c, 'r', str(wmin),
str(wmax), 'g', str(rvg)]) + '.dat'
else:
oname = '_'.join(['c', c, 'r', str(wmin),
str(wmax), 'g', str(rvg)]) + '.dat'
if self.debug:
print "Writing spectrum: %s." % oname
# get the parameters
# the radial velocity
rvpar = self.sl.get_parameter(rv=rvg)[c]
# remaining parameters
cpars = reg_pars[c]
# append the radial velocity
cpars.extend(rvpar)
# print cpars
# separate those that need to be computed,
# i.e. those not defined by the grid
computepars = [par for par in cpars if par['name'] in self._not_given_by_grid]
computepars = self.extract_parameters(computepars)
# print computepars
# compute the synthetic spectra
w, i = self.synthetics[r][c].get_spectrum(wmin=wmin, wmax=wmax, korel=korel, **computepars)
# constrauct header of the file
header = ''
header += '# Component: %s\n' % str(c)
header += '# Region: (%s,%s)\n' % (str(wmin), str(wmax))
header += '# KOREL: %s\n' % str(korel)
header += '# Parameters: %s\n' % str(self.extract_parameters(cpars))
# write the file
ofile = open(oname, 'w')
ofile.writelines(header)
np.savetxt(ofile, np.column_stack([w, i]), fmt='%15.10e')
ofile.close()
# destroy the
if rvgroups is None:
rv_groups = None
class List(object):
"""
Future parent class for all the lists, which are dictionaries... :-)
"""
def __init__(self, l=None, debug=False):
"""
:param l: the list stored within the class
:param debug: debugmode on/off
:return:
"""
# list
if l is not None:
self.mainList = l
else:
self.mainList = {}
# setup debug mode
self.debug = debug
def clear_all(self):
"""
Clears the list
:return: None
"""
self.mainList = {}
class ObservedList(object):
"""
A helper class which groups all observed spectra and
prepares necessary parameters for fitting.
"""
def __init__(self, observedSpectraList=None, debug=False):
"""
:param observedSpectraList: this should not be used in general, this creates the class
assuming that we are passin the self.observedSpectraList,
this shoudl not be used probably
:param debug: debug mode
:return:
"""
# dictionary containing all observed spectra, apart from that
# it also carries information on. A group fro radial velocities
# has to be always set, because we intend to fit spectra acquired
# on different times.
self.observedSpectraList = dict(spectrum=[], group=dict(), properties=dict())
self.groupValues = dict()
# list of properties
self._property_list = ['component', 'filename', 'hasErrors', 'korel', 'loaded', 'wmin', 'wmax']
# self._queriables = copy.deepcopy(self._property_list).extend(['group'])
# although wmin, wmax can be queried, it is treated separately from the remaining
# parameters, because it cannot be tested on equality
self._queriables = [x for x in self._property_list if x not in ['wmin', 'wmax']]
self._queriable_floats = ['wmin', 'wmax']
# initialize with empty lists
self.observedSpectraList['properties'] = {key: [] for key in self._property_list}
# debug
self.debug = debug
if observedSpectraList is not None:
self.observedSpectraList = observedSpectraList
self.read_groups()
self.read_properties()
self.groupValues = self.get_defined_groups()
def __len__(self):
"""
Returns number of attached observed spectra.
"""
return len(self.observedSpectraList['spectrum'])
def __str__(self):
"""
String method for the class
:return:
string.. string representation of teh class
"""
string = 'List of all attached spectra:\n'
for i, spectrum in enumerate(self.observedSpectraList['spectrum']):
string += str(spectrum)
return string
def add_one_observation(self, obs=None, update=True, **kwargs):
"""
Adds observation to the list.
:param obs observed spectrum wrapped in ObservedSpectrum class
:param update - update the observed spectra list
:param kwargs
see class ObservedSpectrum (observations module) for details.
"""
# adds the spectrum and loads it
if self.debug:
kwargs['debug'] = True
if obs is None:
obs = ObservedSpectrum(**kwargs)
self.observedSpectraList['spectrum'].append(obs)
if self.debug:
print "Adding spectrum: %s" % (str(obs))
# builds the observedSpectraList dictionary
if update:
self.read_groups()
self.read_properties()
self.groupValues = self.get_defined_groups()
def add_observations(self, spec_list, update=True):
"""
:param spec_list: list of dictionaries - key words are
the same as for ObservedSpectrum class constructor
:param update: whether to update the dictionary
with the properties of the observed spectra
"""
# attachs the spectra
for rec in spec_list:
if isinstance(rec, dict):
self.add_one_observation(update=False, **rec)
else:
self.add_one_observation(update=False, obs=rec)
# builds the observedSpectraList dictionary
if update:
self.read_groups()
self.read_properties()
self.groupValues = self.get_defined_groups()
def clear_all(self):
"""
Clears all spectra.
"""
self.__init__()
def get_data_groups(self, components):
"""
Returns a dictionary, containing a record
on defined group for each component.
:param components: a list of queried components
:return:
"""
groups = dict()
for component in components:
osl = self.get_spectra(verbose=True, component=component)
if self.debug:
print 'Queried observed spectra: %s for component: %s.' % (str(osl), component)
if len(osl) > 0:
groups[component] = ObservedList(observedSpectraList=osl).get_defined_groups()
return groups
def get_defined_groups(self, component=None):
"""
Reads all groups and values that are set
for the spectra in the list.
:param component
:return dictionary of defined group for all/given component
"""
if component is 'all':
component = None
# empty dicitonary for the values
groups = dict()
# go through ech spectrum and store defined values
for spectrum in self.observedSpectraList['spectrum']:
# select component
if component is not None and spectrum.component != component:
continue
for key in spectrum.group.keys():
if key not in groups.keys():
groups[key] = []
if isinstance(spectrum.group[key], (list, tuple)):
groups[key].extend(spectrum.group[key])
else:
groups[key].append(spectrum.group[key])
# only unique values are needed
for key in groups.keys():
groups[key] = np.unique(groups[key]).tolist()
return groups
def get_resolution(self, verbose=False):
"""
Reads resoolution for each spectrum
:param verbose
:return:
"""
# create a list of resolutions
resolutions = np.zeros(len(self))
for i in range(0, len(self)):
resolutions[i] = self.observedSpectraList['spectrum'][i].step
# if verbose is set returns resolution for each spectrum
if verbose:
return resolutions
# or just the maximum value
else:
return np.max(resolutions)
def get_spectra(self, verbose=False, permissive=False, **kwargs):
"""
:param kwargs.. properties of ObservedSpectrum,
that we want to return. This function does not
search the individual spectra, but the dictionary
observedSpectraList.
:param verbose return the whole bserved spectra list
stub
:param permissive
In general this could be - wmin, wmax, group,
component etc..
:return:
speclist = all spectra that have the queried properties
"""
# First of all check that all passed arguments are
# either defined among queriables or is in groups
to_pass = []
for key in kwargs.keys():
# print key, self._queriables
if (key not in self._queriables) & (key not in self._queriable_floats):
if key not in self.groupValues.keys():
if permissive:
to_pass.append(key)
continue
raise KeyError('Keyword %s is not defined. This either means, that it was not set up for '
'the observed spectra, or is an attribute of Observed spectrum, but is not '
'defined among queriables, or is wrong.' % key)
# create a copy of the spectralist
osl = copy.deepcopy(self.observedSpectraList)
# debug string
dbg_string = 'Queried: '
# reduce the list
for key in kwargs.keys():
#
if key in to_pass:
continue
# find all matching for a given key-word
keytest = key.lower()
# these can be tested on equality as strings
if keytest in self._queriables:
vind = np.where(np.array(osl['properties'][keytest], dtype=str) == str(kwargs[key]))
elif keytest == 'component':
vind = np.where((np.array(osl['properties'][keytest], dtype=str) == str(kwargs[key])) or
(np.array(osl['properties'][keytest], dtype=str) == 'all'))[0]
# that cannot be tested on equality
elif keytest == 'wmin':
vind = np.where(np.array(osl['properties'][keytest]) <= kwargs[key])[0]
elif keytest == 'wmax':
vind = np.where(np.array(osl['properties'][keytest]) >= kwargs[key])[0]
# those that are defined in groups
elif keytest in osl['group'].keys():
vind = []
for i in range(0, len(osl['spectrum'])):
if isinstance(osl['group'][keytest][i], (tuple, list)):
if kwargs[key] in osl['group'][keytest][i]:
vind.append(i)
else:
if kwargs[key] == osl['group'][keytest][i]:
vind.append(i)
vind = np.array(vind)
if len(vind) == 0:
warnings.warn('No spectrum matching %s: %s was found in the '
'list of observed spectra:\n%sDo not panic, it can '
'still be listed among \'all\'.' % (key, str(kwargs[key]), str(self)))
return []
if self.debug:
dbg_string += '%s: %s ' % (key, str(kwargs[key]))
print "%s.. %s spectra remain." % (dbg_string, str(len(vind)))
# extract them from the list
for dic in osl.keys():
# if the key refers to a dictionary
if isinstance(osl[dic], dict):
for sub_key in osl[dic].keys():
osl[dic][sub_key] = (np.array(osl[dic][sub_key])[vind]).tolist()
# if it refers to a list or array
else:
osl[dic] = (np.array(osl[dic])[vind]).tolist()
# simple output, just spectra
if not verbose:
return osl['spectrum']
# otherwise the whole remnant of the
# observed spectra list is returned
else:
return osl
def load(self, f):
"""
Loads the text representation of the class from
a file f.
:param f
:return:
"""
# read the file
lines = read_text_file(f)
data_start = len(lines)
for i, l in enumerate(lines):
if l.find('OBSERVEDLIST') > -1:
data_start = i
break
# check that there are actually some data in the file
# the algorithm failed to load the class
if data_start >= len(lines):
return False
# create a regionlist
ol = ObservedList()
# from here the file is actually being read
for i, l in enumerate(lines[data_start+1:]):
# once we reach regionlist, we end
if l.find('OBSERVEDLIST') > -1:
break
# split the linbe
d = l.split()
# print d
if d[0].find('filename') > -1:
i = 0
cdict = {}
# print d
while i < len(d):
if d[i].find(':') > -1:
j = i + 1
while j < len(d) and d[j].find(':') == -1:
j += 1
stub = d[i:j]
if len(stub) < 3:
cdict[d[i].strip(':')] = stub[1].strip(':[]{}\'\"')
else:
cdict[d[i].strip(':')] = map(int, [stub[k].strip(':[]{}\'\"') for k in range(1, len(stub))])
i = j
# it is a mess with the global error :-(
cdict['error'] = cdict['global_error']
del cdict['global_error']
# cast the parameters to the correct types
parnames = ['filename', 'component', 'error', 'korel', 'hjd']
cast_types = [str, str, float, string2bool, float]
for k in cdict.keys():
if k in parnames:
i = parnames.index(k)
if cdict[k] != 'None':
cdict[k] = cast_types[i](cdict[k])
else:
cdict[k] = None
else:
# the remaining must be groups
cdict[k] = int(cdict[k])
# add the parameter if it does not exist
groups = {key: cdict[key] for key in cdict.keys() if key not in parnames}
kwargs = {key: cdict[key] for key in cdict.keys() if key in parnames}
ol.add_one_observation(group=groups, **kwargs)
# do the same for enviromental keys
if d[0].find('env_keys') > -1:
# the first string is just identification
d = d[1:]
# secure corrct types
recs = ['debug']
cast_types = [string2bool]
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
for k in cdict.keys():
if k in recs:
i = recs.index(k)
ctype = cast_types[i]
cdict[k] = ctype(cdict[k])
# assign the vlues
setattr(ol, k, cdict[k])
# finally assign everything to self
attrs = ['debug', 'groupValues', 'observedSpectraList']
for attr in attrs:
setattr(self, attr, getattr(ol, attr))
# if we got here, we loaded the data
return True
def read_groups(self):
"""
Updates the dictionary observedSpectraList with group
records for every single observations and creates
the dictionary groupValues which contains lists of
all defined groups for every parameter.
For parameters != 'rv':
If at least one spectrum has a group assigned
it is automatically assumed, that it does not
belong among the remaining ones. This means
that all remaining spectra are assigned their
own group.
For parameters == 'rv':
Each spectrum is assigned unique RV group,
unless this is overriden by the user by setting
them up. This comes natural, since we are
likely to fit spectra from different times,
regions, where slight shifts in rv are
very likely.
"""
# First go through each spectrum to see, which
# groups were defined by user
groups = self.get_defined_groups()
# print groups
# check that rv has been setup - mandatory, because each observed spectrum
# is assigned its own rv_group
if 'rv' not in groups.keys():
groups['rv'] = []
# assign empty group arrays
for key in groups.keys():
self.observedSpectraList['group'][key] = np.zeros(len(self)).astype('int16').tolist()
# Assigning groups to every spectrum
for i, spectrum in enumerate(self.observedSpectraList['spectrum']):
for key in groups.keys():
# If not user defined the maximal possible
# group is assigned
if key != 'rv':
gn = spectrum.get_group(key)
def_groups = groups[key]
# print key, gn, def_groups
# if spectrum has no group, but some groups have been defined,
# the group is assigned to the least number not in defuined groups
if gn is None and len(def_groups) > 0:
gn = 0
while gn in def_groups:
gn += 1
# if no group is defined for all spectra, start with zero
elif gn is None and len(def_groups) == 0:
gn = 0
# store the groupnumber
self.observedSpectraList['group'][key][i] = gn
else:
gn = spectrum.get_group(key)
if gn is None:
self.observedSpectraList['group'][key][i] = None
else:
self.observedSpectraList['group'][key][i] = gn
# propagate the groups back to spectra
self._set_groups_to_spectra()
def read_properties(self):
"""
Goes through the attached spectra and reads
stores them within the observedSpectraList
dictionary.
"""
# initialize with empty lists
for key in self._property_list:
self.observedSpectraList['properties'][key] = np.empty(len(self), dtype=object)
# fill the dictionary
for i, spectrum in enumerate(self.observedSpectraList['spectrum']):
for key in self._property_list:
self.observedSpectraList['properties'][key][i] = getattr(spectrum, key)
def save(self, ofile):
"""
Saves the class. It should be retrievable from the file.
:param ofile:
:return:
"""
# Open the file
if isinstance(ofile, str):
ofile = open(ofile, 'w+')
# parameters listed for each record in the RegionList
enviromental_keys = ['debug']
string = ' OBSERVEDLIST '.rjust(105, '#').ljust(200, '#') + '\n'
for s in self.observedSpectraList['spectrum']:
keys = ['filename', 'component', 'korel', 'global_error', 'groups', 'hjd']
for k in keys:
if k not in ['groups']:
string += '%s: %s ' % (k, str(getattr(s, k)))
else:
for gk in s.group.keys():
if isinstance(s.group[gk], (list, tuple)):
string += '%s: ' % gk
for gn in s.group[gk]:
string += '%s ' % str(gn)
else:
string += '%s: %s ' % (gk, str(s.group[gk]))
string += '\n'
# attach enviromental keys
for ekey in enviromental_keys:
string += "%s: %s " % (ekey, str(getattr(self, ekey)))
string += '\n'
# finalize the string
string += ' OBSERVEDLIST '.rjust(105, '#').ljust(200, '#') + '\n'
# write the result
ofile.writelines(string)
def set_spectrum(self, filename=None, **kwargs):
"""
Sets spectrum to a given value.
:param filename
:param kwargs:
:return:
"""
# print kwargs
for i in range(0, len(self)):
if self.observedSpectraList['spectrum'][i].filename == filename:
for key in kwargs.keys():
setattr(self.observedSpectraList['spectrum'][i], key, kwargs[key])
if key is 'group':
self.observedSpectraList['spectrum'][i].set_group(kwargs[key])
# print self
self.read_groups()
self.groupValues = self.get_defined_groups()
def _set_groups_to_spectra(self):
"""
Propagates groups, which are set in observedSpectraList,
in individual spectra.
"""
for i in range(0, len(self.observedSpectraList['spectrum'])):
group = {key: self.observedSpectraList['group'][key][i] for key in self.observedSpectraList['group'].keys()}
self.observedSpectraList['spectrum'][i].set_group(group)
class RegionList(List):
"""
"""
def __init__(self, **kwargs):
"""
Class constructor
:return:None
"""
# setup the parent class
super(RegionList, self).__init__(**kwargs)
# registered keywords
self._registered_records = ['components', 'groups', 'wmin', 'wmax']
# if not given along the class a blank one is created
if len(self.mainList.keys()) < 1:
self.mainList = {}
self._registered_regions = []
self._user_defined_groups = {}
else:
self._registered_regions = self.get_registered_regions()
def __str__(self):
"""
String representation of the class.
:return: string
"""
string = ''
# go over regions
for key0 in self.mainList.keys():
# region properties
string += "Region name: %s: (wmin, wmax) = (%s, %s):\n" % (key0, str(self.mainList[key0]['wmin']),
str(self.mainList[key0]['wmax']))
# componentn properties
for i in range(0, len(self.mainList[key0]['components'])):
string += "%s: %s " % ('component', str(self.mainList[key0]['components'][i]))
string += "%s: %s " % ('groups', str(self.mainList[key0]['groups'][i]))
string += '\n'
return string
def add_region(self, component='all', identification=None, wmin=None, wmax=None, groups=None):
"""
:param component: component for whichg the region apply
:param identification
:param wmin: minimal wavelength
:param wmax: maximal wavelength
:param groups: group numbers for this region
:return: None
"""
# if we are crazy and want to set this up
# either by wavelength or by identification
if (wmin is None or wmax is None) and identification is None:
raise ValueError('Boundaries are not set properly: (wmin,wmax)= (%s, %s)' % (str(wmin), str(wmax)))
else:
if (wmin >= wmax) and identification not in self._registered_regions:
raise ValueError('wmin is greater than wmax: %s > %s '
'or the region: %s is not registered.' % (str(wmin), str(wmax), identification))
# convert component/group/identification keys to lowercase
if groups is not None:
groups = keys_to_lowercase(groups)
else:
groups = {}
# make component case insensitive
component = component.lower()
ident = identification
if ident is not None:
ident = ident.lower()
# maybe the region has been already defined
if ident in self.mainList.keys():
region = ident
elif ident is None:
region = self.get_region(wmin, wmax)
else:
region = None
# if there is a region exists and the component is all,
# there is no point to attach it
# print region, component
if (region is not None) and (component == 'all'):
warnings.warn('The region already exists as region: %s -> doing nothing.' % region)
return
# if it is not empty
if region is not None:
if self.debug:
print "Adding component: %s to region: %s" % (component, region)
# check that the component ws not set earlier
if self.has_component(region, component):
warnings.warn('The component: %s is already set for region: %s. -> doing nothing.'
% (component, region))
return
# get lr from the region first record
# print groups, self.mainList[region]['groups']
groups['lr'] = self.mainList[region]['groups'][0]['lr']
self.read_user_defined_groups(groups)
# store everything apart from the wmin, wmax
self.mainList[region]['groups'].append(groups)
self.mainList[region]['components'].append(component)
# readout user-defined groups
self.read_user_defined_groups(groups)
else:
# setup identification for
if ident is None:
ident = 'region' + str(len(self._registered_regions)).zfill(2)
if self.debug:
print "Creating new region: %s." % ident
# register the new region
self.mainList[ident] = dict(wmin=wmin, wmax=wmax, components=[component], groups=[])
self._registered_regions.append(ident)
# if the luminosity group is not defined
if 'lr' not in groups.keys():
all_groups = self.get_defined_groups()
if 'lr' in all_groups.keys():
def_groups = all_groups['lr']
else:
def_groups = []
gn = 0
while gn in def_groups:
gn += 1
groups['lr'] = gn
# add groups to the list
self.mainList[ident]['groups'].append(groups)
# readout user-defined groups
self.read_user_defined_groups(groups)
self.setup_undefined_groups()
def clear_all(self):
"""
Clears the class.
:return:
"""
super(RegionList, self).clear_all()
self._registered_regions = []
self._user_defined_groups = {}
def get_defined_groups(self):
"""
Returns plain list of all defined groups regardless of their components.
:return: list of defined groups
"""
groups = {}
for reg in self._registered_regions:
for rec in self.mainList[reg]['groups']:
for key in rec.keys():
if key not in groups.keys():
groups[key] = [rec[key]]
else:
if rec[key] not in groups[key]:
groups[key].append(rec[key])
return groups
def get_region(self, wmin, wmax):
"""
Checks that a region with this wavelength range
does not exist.
:param wmin
:param wmax
:return:
"""
for region in self.mainList:
if (abs(self.mainList[region]['wmin'] - wmin) < ZERO_TOLERANCE) & \
(abs(self.mainList[region]['wmax'] - wmax) < ZERO_TOLERANCE):
return region
return None
def get_region_groups(self):
"""
A dictionary of groups defined for regions component by component.
:return: dictionary containing records on groups
which can be directly passed to type StarList
through set_groups
"""
groups = {}
# go over each region
for reg in self.mainList.keys():
for i in range(0, len(self.mainList[reg]['components'])):
component = self.mainList[reg]['components'][i]
comp_groups = self.mainList[reg]['groups'][i]
# setup component
if component not in groups.keys():
groups[component] = {}
# setup keys
for key in comp_groups.keys():
if key not in groups[component].keys():
groups[component][key] = [comp_groups[key]]
else:
if comp_groups[key] not in groups[component][key]:
groups[component][key].append(comp_groups[key])
return groups
def get_registered_regions(self):
"""
Returns an array of registered regions.
:return:
"""
return self.mainList.keys()
def get_wavelengths(self, verbose=False):
"""
Returns registered wavelengths
:param verbose
:return: wmins, wmaxs = arrays of minimal/maximal wavelength for each region
"""
wmins = []
wmaxs = []
regs = []
for reg in self.mainList.keys():
wmins.append(self.mainList[reg]['wmin'])
wmaxs.append(self.mainList[reg]['wmax'])
regs.append(reg)
if verbose:
return wmins, wmaxs, regs
else:
return wmins, wmaxs
def get_regions_from_obs(self, ol, append=False):
"""
Reads the region from a list of observations. In general this
function should not be used for fitting, because it
makes no sense to fit the whole spectrum.
:param ol: list of ObservedSpectrum
:param append are we appending to existing list?
:return: list of unique limits
"""
if len(ol) == 0:
raise ValueError('Cannot setup regions from observed spectra, because'
' their list is empty!')
# clear the regions if needed
if not append:
self.clear_all()
# empty arrays for limits
limits = {}
# the rounding is there get over stupid problems with float precision
for obs in ol:
component = obs.component
if component not in limits:
limits[component] = [[], []]
limits[component][0].append(np.ceil(obs.wmin))
limits[component][1].append(np.floor(obs.wmax))
# get only unique values
for i in range(0, 2):
limits[component][i] = np.unique(limits[component][i])
# check that something funny did not happen
for component in limits.keys():
if len(limits[component][0]) != len(limits[component][1]):
raise ValueError('The limits were not read out correctly from observed spectra.')
# setup the regions
for i in range(0, len(limits[component][0])):
self.add_region(component=component,
wmin=limits[component][0][i],
wmax=limits[component][1][i])
return limits
def has_component(self, region, component):
"""
Checks that certain component was attached for a given
region.
:param region:
:param component:
:return: bool has/has_not the component
"""
for regcomp in self.mainList[region]['components']:
if (regcomp == component) or (regcomp == 'all'):
return True
return False
def load(self, f):
"""
Loads the text representation of the class from
a file f.
:param f
:return:
"""
# read the file
lines = read_text_file(f)
data_start = len(lines)
for i, l in enumerate(lines):
if l.find('REGIONLIST') > -1:
data_start = i
break
# check that there are actually some data in the file
# if not we failed
if data_start >= len(lines):
return False
# create a regionlist
rl = RegionList()
# from here the file is actually being read
for i, l in enumerate(lines[data_start+1:]):
# once we reach regionlist, we end
if l.find('REGIONLIST') > -1:
break
# split the linbe
d = l.split()
# print d
if d[0].find('identification') > -1:
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
# print cdict
# cast the paramneters to teh correct types
parnames = ['wmin', 'wmax', 'identification', 'component']
cast_types = [float, float, str, str]
for k in cdict.keys():
if k in parnames:
i = parnames.index(k)
cdict[k] = cast_types[i](cdict[k])
else:
# the remaining must be groups
cdict[k] = int(cdict[k])
# add the parameter if it does not exist
groups = {key: cdict[key] for key in cdict.keys() if key not in parnames}
kwargs = {key: cdict[key] for key in cdict.keys() if key in parnames}
# print groups
# # print kwargs
rl.add_region(groups=groups, **kwargs)
# do the same for enviromental keys
if d[0].find('env_keys') > -1:
# the first string is just identification
d = d[1:]
# secure corrct types
recs = ['debug']
cast_types = [string2bool]
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
for k in cdict.keys():
if k in recs:
i = recs.index(k)
ctype = cast_types[i]
cdict[k] = ctype(cdict[k])
# assign the vlues
setattr(rl, k, cdict[k])
# finally assign everything to self
attrs = ['_registered_records', '_registered_regions', '_user_defined_groups',
'mainList', 'debug']
for attr in attrs:
setattr(self, attr, getattr(rl, attr))
# if we got here, we loaded the data
return True
def read_user_defined_groups(self, groups):
"""
When adding new region, all user defined groups
are read out to properly set the default groups
:param groups groups to be read
:return: None
"""
for key in groups.keys():
if key not in self._user_defined_groups.keys():
self._user_defined_groups[key] = [groups[key]]
else:
if groups[key] not in self._user_defined_groups[key]:
self._user_defined_groups[key].append(groups[key])
def save(self, ofile):
"""
Saves the class. It should be retrievable from the file.
:param ofile:
:return:
"""
# Open the file
if isinstance(ofile, str):
ofile = open(ofile, 'w+')
# parameters listed for each record in the RegionList
enviromental_keys = ['debug']
string = ' REGIONLIST '.rjust(105, '#').ljust(200, '#') + '\n'
for ident in self.mainList.keys():
for i, c in enumerate(self.mainList[ident]['components']):
string += 'identification: %s ' % ident
# write the wavelengths
for lkey in ['wmin', 'wmax']:
string += '%s: %s ' % (lkey, str(self.mainList[ident][lkey]))
# write components
string += "component: %s " % c
# and groups
for gkey in self.mainList[ident]['groups'][i].keys():
string += "%s: %s " % (gkey, str(self.mainList[ident]['groups'][i][gkey]))
string += '\n'
# setup additional parameters
string += 'env_keys: '
for ekey in enviromental_keys:
string += '%s: %s ' % (ekey, str(getattr(self, ekey)))
string += '\n'
string += ' REGIONLIST '.rjust(105, '#').ljust(200, '#') + '\n'
# write the remaining parameters
ofile.writelines(string)
def setup_undefined_groups(self):
"""
User can be a bit lazy. If we split some parameter
into more groups, we can only set group for few
and the remaining dataset gets a default one.
This nonetheless has to be run after all
regions were attached. If we do this
earlier, we will get into serious problems.
:return:
"""
# defined groups
groups = self.get_defined_groups()
# setup default group numbers for region->component
# with unset group
for region in self._registered_regions:
for i, comp_group in enumerate(self.mainList[region]['groups']):
# go over each defined group
for key in groups.keys():
# if the key is unset for the component
# we have to assign some. This must
# not be one of the user-defined.
# That is why we maintain dictionary
# of user defined groups.
if key not in comp_group.keys():
gn = 0
while gn in self._user_defined_groups[key]:
gn += 1
self.mainList[region]['groups'][i][key] = gn
class StarList(object):
"""
"""
def __init__(self, debug=False):
"""
"""
# set up debug mode
self.debug = debug
# define empty list of components
self.componentList = {}
# array storing registered components
self._registered_components = []
# defined groups
self.groups = {}
# empty dictionary for the list of
# fitted types
self.fitted_types = {}
def __len__(self):
"""
Returns number of parameters.
:return: l
"""
pass
def __str__(self):
"""
:return: string = string represantation of the class
"""
string = ''
for component in self.componentList.keys():
string += "Component: %s\n" % component
for parkey in self.componentList[component].keys():
for par in self.componentList[component][parkey]:
string += str(par)
return string
def add_component(self, component=None, groups={}, use_defaults=True, **kwargs):
"""
Setups a component - if no kwargs are given,
all parameters from the parameter_definitions
are taken.
If one wants to not-include a parameter,
params = None, has to be passed. If one
wants to add a parameter, that is not
defined in parameter definitions, just
pass parameter + value.
:param component: Registration string of the component
if None is given, it is registred as 'componentXX'
:param groups: group set to all parameters of a component
:param use_defaults
:param kwargs:
:return:
"""
# setup name of the component and create a record within
# component list
if component is None:
component = 'component' + str(len(self._registered_components))
# register he component
self._registered_components.append(component)
# the parameters will be stored in a dictionary
self.componentList[component] = dict()
pd = copy.deepcopy(parameter_definitions)
# setup groups for default parameters
for key in groups.keys():
if key in pd.keys():
pd[key]['group'] = groups[key]
# process the keyword-arguments
for key in kwargs.keys():
keytest = key.lower()
# if we pass par + value, it is just stored
if keytest in pd.keys() and kwargs[key] is not None:
self.componentList[component][keytest] = []
self.componentList[component][keytest].append(Parameter(**pd[key]))
self.componentList[component][keytest][-1]['value'] = kwargs[key]
elif kwargs[key] is None:
warnings.warn('The parameter %s is set to %s. Therefore it is not '
'included into component parameters.' % (key, str(kwargs[key])))
elif keytest not in pd.keys() and kwargs[key] is not None:
# set up group
if keytest in groups.keys():
group = groups[keytest]
self.componentList[component][keytest] = []
self.componentList[component][keytest].append(Parameter(name=key, value=kwargs[key], group=group))
self.componentList[component][keytest][-1].set_empty()
warnings.warn('The parameter %s: %s is not set among the '
'parameter definitions. Therefore you should pay '
'attention to ist settings.')
# pass all unset parameters in definitions
if use_defaults:
for key in pd.keys():
if key not in self.componentList[component].keys():
self.componentList[component][key] = []
self.componentList[component][key].append(Parameter(**pd[key]))
# readout the groups
self.read_groups()
self.get_fitted_types()
def add_parameter_to_component(self, component, p=None, **kwargs):
"""
Adds a parameter to a specific component.
:param component: component for which we want to add a parameter
:param p: assigning directly the Parameter type
:param kwargs: see Parameter class for description
:return:
"""
if p is None:
self.componentList[component][kwargs['name']] = []
self.componentList[component][kwargs['name']].append(Parameter(**kwargs))
else:
# print p['name']
self.componentList[component][p['name']].append(copy.deepcopy(p))
# redefine groups
self.read_groups()
self.get_fitted_types()
def add_parameter_to_all(self, **kwargs):
"""
Adds a parameter to all components
:param kwargs: see Parameter class
:return: None
"""
for component in self._registered_components:
self.add_parameter_to_component(component, **kwargs)
def clear(self):
"""
Clears the component list
:return: None
"""
self.componentList = {}
self._registered_components = []
def clone_parameter(self, component, parameter, index=0, **kwargs):
"""
Clones a parameter and stores it for a given component.
This function will be primarily used to clone parameters
to acount for different groups.
:param component: component for which we want to clone the parameter
:param parameter: the cloned parameter
:param index : the specific cloned parameter
:param kwargs: values we want to change for the parameter
:return: clone type_Parameter - the cloned parameter
"""
# in case we pass
if component.lower() == 'all':
components = self._registered_components
else:
components = [component]
clones = []
# go over each component
for component in components:
# copy the parameter
clone = copy.deepcopy(self.componentList[component][parameter][index])
clones.append(clone)
# adjust its values
for key in kwargs.keys():
keytest = key.lower()
clone[keytest] = kwargs[key]
# append the new component to the componentlist
self.add_parameter_to_component(component, p=clone)
return clones
def copy(self):
"""
Creates a deepcopy of the class StarList.
:return:
"""
other = StarList()
for attr in ['_registered_components', 'componentList', 'debug',
'fitted_types', 'groups']:
v = getattr(self, attr)
setattr(other, attr, copy.deepcopy(v))
return other
def delete_hollow_groups(self):
"""
Goes through parameters and deletes those that
are set to None.
:return: None
"""
for component in self._registered_components:
for parkey in self.componentList[component].keys():
i = 0
while i < len(self.componentList[component][parkey]):
# if the parameter group is not, it is deleted
if self.componentList[component][parkey][i]['group'] is None:
del self.componentList[component][parkey][i]
else:
i += 1
def delete_duplicities(self):
"""
Delete duplicities in groups.
:return: None
"""
for component in self._registered_components:
# groups can a have to be the same for two components ofc,
def_groups = []
for parkey in self.componentList[component].keys():
i = 0
while i < len(self.componentList[component][parkey]):
if self.componentList[component][parkey][i]['group'] not in def_groups:
def_groups.append(self.componentList[component][parkey][i]['group'])
i += 1
# if the parameter with the group has been already defined, delete it
else:
del self.componentList[component][parkey][i]
def get_common_groups(self):
"""
Returns a dictionary of groups shared by all components.
:return: com_groups
"""
# get the keys of physical parameters
parkeys = self.get_physical_parameters()
# get the groups
com_groups = {}
for key in parkeys:
com_groups[key] = []
# define teh reference component
comp0 = self._registered_components[0]
# groups are always common for one parameter
if len(self._registered_components) < 2:
is_common = True
# go over each group of
for i in range(0, len(self.componentList[comp0][key])):
refpar = self.componentList[comp0][key][i]
# print refpar
# at the beginning
for component in self._registered_components[1:]:
is_common = False
for j, par in enumerate(self.componentList[component][key]):
# print par
if refpar['group'] == par['group']:
is_common = True
break
if not is_common:
break
if is_common:
com_groups[key].append(refpar['group'])
return com_groups
def get_components(self):
"""
Returns list of all defined components.
:return:
"""
return copy.deepcopy(self._registered_components)
def get_defined_groups(self, component=None, parameter=None):
""":
:param component: starlist component
:param parameter: physical parameter
:return: dictionary of groups
"""
groups = {}
# setup parameters
if parameter is None:
parameters = self.get_physical_parameters()
else:
parameters = [parameter]
# setup components
if component is None or component == 'all':
components = self.get_components()
else:
components = [component]
# go over the registered componentss
for comp in components:
groups[comp]= {}
# go over passed parameters
for param in parameters:
groups[comp][param] = []
for regparam in self.componentList[comp][param]:
if regparam.name == param:
groups[comp][param].append(regparam.group)
# merge groups if component was 'all'
if component == 'all':
for p in parameters:
groups[component] = {}
temp = []
for c in components:
# print flatten_2d(groups[c][p])
temp.extend(groups[c][p])
groups[component][p] = np.unique(temp).tolist()
return groups
def get_fitted_parameters(self, verbose=False):
"""
Returns a list of fitted parameters wrapped within the Parameter class ofc.
:param verbose - return a dictionary with additional info on the
fitted parameters.
:return:
"""
fit_pars = []
# info on the fitted parameters
# is stored in a list and passed if
# necessary
if verbose:
fit_pars_info = {'component': [], 'group': [], 'name': [], 'value': []}
# go over all parameters and components
for c in self._registered_components:
for parname in self.get_physical_parameters():
for par in self.componentList[c][parname]:
if par['fitted']:
fit_pars.append(par)
if verbose:
for k in fit_pars_info.keys():
if k != 'component':
fit_pars_info[k].append(par[k])
else:
fit_pars_info[k].append(c)
if not verbose:
return fit_pars
else:
return fit_pars, fit_pars_info
def get_fitted_types(self):
"""
Stores a dictionary of fitted types for
each component in the class. This should
be updated whenever a parameter is changed.
:return:
"""
fitted_types = {}
# go over each component
for c in self.componentList.keys():
fitted_types[c] = []
# go over each parameter type
for parname in self.componentList[c]:
# and finaly over each parameter
for par in self.componentList[c][parname]:
if parname not in fitted_types[c]:
if par['fitted']:
fitted_types[c].append(parname)
else:
break
# print fitted_types
self.fitted_types = fitted_types
def get_index(self, component, parameter, group):
"""
Returns index of a component/parameter/group.
:param component:
:param parameter:
:param group:
:return:
"""
for i, par in enumerate(self.componentList[component][parameter]):
if par['group'] == group:
return i
warnings.warn('Component: %s Parameter: %s Group: %s'
' not found.' % (component, parameter, str(group)))
return None
def get_parameter_types(self):
"""
Returns a list of all parameter names
:return:
"""
partypes = []
# go over each component and parameter
for c in self._registered_components:
for p in self.componentList[c].keys():
if p not in partypes:
partypes.append(p)
return partypes
def get_parameter(self, **kwargs):
"""
Returns all parameters, which have certain group.
:param kwargs:
:return:
"""
pars = {x: [] for x in self._registered_components}
for key in kwargs.keys():
for c in self._registered_components:
for i, par in enumerate(self.componentList[c][key]):
# print i, par
if par.group == kwargs[key]:
pars[c].append(self.componentList[c][key][i])
return pars
def get_physical_parameters(self):
"""
Reads physical parameters from the starlist.
:return:
"""
pars = []
for c in self._registered_components:
pars.extend(self.componentList[c].keys())
return np.unique(pars)
def list_parameters(self):
"""
Returns a list of all parameters.s
:return:
"""
# empty output structure
return copy.deepcopy(self.componentList)
def load(self, f):
"""
Loads the text representation of the class from
a file f.
:param f
:return:
"""
# read the file
lines = read_text_file(f)
data_start = len(lines)
for i, l in enumerate(lines):
if l.find('STARLIST') > -1:
data_start = i
break
# check that there are actually some data in the file
if data_start >= len(lines):
return False
# create a StarList
sl = StarList()
# from here the file is actually being read
for i, l in enumerate(lines[data_start+1:]):
# once we reach starlist again, we end
if l.find('STARLIST') > -1:
break
d = l.split()
if d[0].find('component') > -1:
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
# cast the paramneters to teh correct types
for k in cdict.keys():
if k in ['value', 'vmin', 'vmax']:
cdict[k] = float(cdict[k])
elif k in ['group']:
cdict[k] = int(cdict[k])
elif k in ['fitted']:
cdict[k] = string2bool(cdict[k])
# add the parameter if it does not exist
c = cdict['component']
p = cdict['parameter']
if c not in sl.componentList.keys():
sl.componentList[c] = {}
sl._registered_components.append(c)
if cdict['parameter'] not in sl.componentList[c].keys():
sl.componentList[c][p] = []
# transform the array to Parameter classs
pdict = {key: cdict[key] for key in cdict.keys() if key not in ['parameter', 'component']}
pdict['name'] = p
# add the parameter to teh class
par = Parameter(**pdict)
sl.add_parameter_to_component(component=c, p=par)
# do the same for enviromental keys
if d[0].find('env_keys') > -1:
# the first string is just identification
d = d[1:]
# secure corrct types
recs = ['debug']
cast_types = [string2bool]
cdict = {d[i].rstrip(':'): d[i+1] for i in range(0, len(d), 2)}
for k in cdict.keys():
if k in recs:
i = recs.index(k)
ctype = cast_types[i]
cdict[k] = ctype(cdict[k])
# assign the vlues
setattr(sl, k, cdict[k])
# finally assign everything to self
attrs = ['_registered_components', 'componentList', 'debug',
'fitted_types', 'groups']
for attr in attrs:
setattr(self, attr, getattr(sl, attr))
# if we got here, we loaded the data
return True
def read_groups(self):
"""
Reads all groups from the defined components. This
is then compared to the list obtained from observations
and defined regions,
:return:
"""
for component in self.componentList.keys():
self.groups[component] = dict()
for key in self.componentList[component].keys():
self.groups[component][key] = []
for par in self.componentList[component][key]:
self.groups[component][key].append(par['group'])
def remove_parameter(self, component, parameter, group):
"""
:param component: component for which the parameter is deleted
:param parameter:deleted paramer
:param group
:return:
"""
index = self.get_index(component, parameter, group)
del self.componentList[component][parameter][index]
def reset(self, parameters='all'):
"""
Leaves only one parameter per type and component.
:param parameters - list of reseted parameters
:return:
"""
# cycle over components
for c in self._registered_components:
# select all parameters
if parameters == 'all':
reset_params = self.componentList[c].keys()
else:
reset_params = parameters
# cycle over reseted parameters
for p in reset_params:
self.componentList[c][p] = [self.componentList[c][p][0]]
self.groups[c][p] = [self.groups[c][p][0]]
def save(self, ofile):
"""
Saves the class. It should be retrievable from the file.
:param ofile:
:return:
"""
# Open the file
if isinstance(ofile, str):
ofile = open(ofile, 'w+')
# parameters listed for each record in the starlist
listed_keys = ['value', 'unit', 'fitted', 'vmin', 'vmax', 'group']
string = ' STARLIST '.rjust(105, '#').ljust(200, '#') + '\n'
for c in self.componentList.keys():
for key in self.componentList[c].keys():
for par in self.componentList[c][key]:
string += 'component: %s ' % c
string += 'parameter: %s ' % key
for lkey in listed_keys:
string += '%s: %s ' % (lkey, str(par[lkey]))
string += '\n'
# setup additional parameters
enviromental_keys = ['debug']
string += 'env_keys: '
for ekey in enviromental_keys:
string += '%s: %s ' % (ekey, str(getattr(self, ekey)))
string += '\n'
string += ' STARLIST '.rjust(105, '#').ljust(200, '#') + '\n'
# write the remaining parameters
ofile.writelines(string)
def set_groups(self, groups, overwrite=False):
"""
Sets up groups - this function is designed to
use output from ObservedList.get_groups().
It is assumed that the structure is following:
dict(component01=dict(par1=[], par2=[]), component2=..)
This function should be used to primarily
used to assign rv_groups, where cloning
is necessary to not to get crazy.
This function merges groups defined
in the type and the one passed. In general
we should not be able to do this.
:param overwrite
:param groups
:return: None
"""
for component in groups.keys():
for parkey in groups[component].keys():
# bool variable for case, when we want to completely overwrite
# previous settings
first_in_list = True
for group in groups[component][parkey]:
# setting group for all components
if component.lower() == 'all':
for one_comp in self._registered_components:
# print one_comp, parkey, self.groups
if group not in self.groups[one_comp][parkey]:
warnings.warn("Group %s: %s previously undefined."
"Adding to the remaining groups." % (parkey, str(group)))
# print one_comp, parkey, group
self.clone_parameter(one_comp, parkey, group=group)
# deletes all previous groups
if overwrite and first_in_list:
while len(self.groups[one_comp][parkey]) > 1:
del self.groups[one_comp][parkey][0]
first_in_list = False
# if we are setting group only for one component
else:
if group not in self.groups[component][parkey]:
warnings.warn("Group %s: %s previously undefined."
"Adding to the remaining groups." % (parkey, str(group)))
self.clone_parameter(component, parkey, group=group)
# deletes all previous groups
if overwrite and first_in_list:
while len(self.groups[one_comp][parkey]) > 1:
del self.groups[one_comp][parkey][0]
first_in_list = False
def set_parameter(self, name, component, group, **kwargs):
"""
Sets values defined in kwargs for a parameter
of a given component and group.
:param name:
:param component:
:param group:
:param kwargs
:return:
"""
# print name, component, group, kwargs
name = name.lower()
if name not in self.get_physical_parameters():
raise Exception("Parameter: %s unknown." % name)
elif component not in self._registered_components:
# print self._registered_components, component
raise Exception("Component: %s unknown" % component)
elif group not in self.get_defined_groups(component, name)[component][name]:
raise Exception("Group \"%i\" was not defined for component \"%s\" and parameter \"%s\"!" %
(group, component, name))
else:
for i, par in enumerate(self.componentList[component][name]):
if par['name'] == name and par['group'] == group:
for key in kwargs.keys():
keytest = key.lower()
# print name, component, keytest, kwargs[key]
self.componentList[component][name][i][keytest] = kwargs[key]
# print self
# update the list of fitted types
self.get_fitted_types()
class SyntheticList(List):
"""
List of resulting synthetic spectra.
"""
def __init__(self, **kwargs):
# initialize the parent
super(SyntheticList, self).__init__(**kwargs)
| 151,519 | 35.127802 | 120 | py |
pyterpol | pyterpol-master/synthetic/auxiliary.py | import numpy as np
import matplotlib.pyplot as plt
from astropy.constants import c
from scipy.interpolate import splrep
from scipy.interpolate import splev
from scipy.interpolate import bisplrep
from scipy.interpolate import bisplev
from scipy.interpolate import RectBivariateSpline
from scipy.interpolate import InterpolatedUnivariateSpline
from scipy.interpolate import spline
from scipy.signal import fftconvolve
ZERO_TOLERANCE = 1e-6
def flatten_2d(arr):
"""
Flattens 2-dim array
:param arr: 2d array
:return:
"""
newarr = []
if any([isinstance(subarr, (list, tuple)) for subarr in arr]):
for subarr in arr:
if isinstance(subarr, (tuple, list)):
newarr.extend(subarr)
else:
newarr.append(subarr)
return newarr
else:
return arr
def instrumental_broadening(wave, flux, width=0.25, width_type='fwhm', interpolate_back=True):
"""
A convolution of a spectrum with a normal distribution.
:param: wave:
:param: flux:
:param width:
:param width_type:
:return:
"""
# print "Computing instr. broadening."
# If there is no broadening to apply, don't bother
if width < ZERO_TOLERANCE:
return flux
# Convert user input width type to sigma (standard devation)
width_type = width_type.lower()
if width_type == 'fwhm':
sigma = width / 2.3548
elif width_type == 'sigma':
sigma = width
else:
raise ValueError(("Unrecognised width_type='{}' (must be one of 'fwhm'"
"or 'sigma')").format(width_type))
# Make sure the wavelength range is equidistant before applying the
# convolution
delta_wave = np.diff(wave).min()
range_wave = wave.ptp()
n_wave = int(range_wave / delta_wave) + 1
wave_ = np.linspace(wave[0], wave[-1], n_wave)
# flux_ = np.interp(wave_, wave, flux)
flux_ = interpolate_spec(wave, flux, wave_)
dwave = wave_[1] - wave_[0]
n_kernel = int(2 * 4 * sigma / dwave)
# The kernel might be of too low resolution, or the the wavelength range
# might be too narrow. In both cases, raise an appropriate error
if n_kernel == 0:
raise ValueError(("Spectrum resolution too low for "
"instrumental broadening (delta_wave={}, "
"width={}").format(delta_wave, width))
elif n_kernel > n_wave:
raise ValueError(("Spectrum range too narrow for "
"instrumental broadening"))
# Construct the broadening kernel
wave_k = np.arange(n_kernel) * dwave
wave_k -= wave_k[-1] / 2.
kernel = np.exp(- (wave_k) ** 2 / (2 * sigma ** 2))
kernel /= sum(kernel)
# Convolve the flux with the kernel
flux_conv = fftconvolve(1 - flux_, kernel, mode='same')
# And interpolate the results back on to the original wavelength array,
# taking care of even vs. odd-length kernels
if n_kernel % 2 == 1:
offset = 0.0
else:
offset = dwave / 2.0
if interpolate_back:
flux = np.interp(wave + offset, wave_, 1 - flux_conv, left=1, right=1)
# flux = interpolate_spec(wave_, 1-flux_conv, wave+offset)
# Return the results.
return flux
def interpolate_block(x, block, xnew):
"""
Interpolates in each line of a 2d array.
:param x: independent variable
:type x: numpy.float64
:param block: 2d array for each column f(x)= block[i]
:type block: numpy.float64
:param xnew: point at which it is interpolated
:type xnew: float
:return:
"""
intens = np.zeros(len(block[0]))
n = len(block[:, 0])
# set up the order of interpolation
if n > 4:
k = 3
else:
k = n - 1
# k=3
# TODO Can thius be done faster with bisplrep and bisplev
# do the interpolation
for i in range(0, len(block[0])):
y = block[:, i]
tck = splrep(x, y, k=k)
intens[i] = splev(xnew, tck, der=0)
return intens
def interpolate_block_faster(x, block, xnew):
"""
Interpolation of teh spectra... hopefully faster?
:param x:
:param block:
:param xnew:
:return:
"""
# length of the datablock
nx = len(block[0])
ny = len(x)
# print x
if (ny > 3) & (ny < 6):
ky = 3
elif ny > 5:
ky = 5
else:
ky = ny - 1
# print ky
f = RectBivariateSpline(x, np.arange(nx), block, kx=ky, ky=1)
intens = f(xnew, np.arange(nx))[0]
return intens
def interpolate_spec(wave0, intens0, wave1):
"""
Defines a function intens0 = f(wave0) and
than interpolates in it at wave1.
:param wave0: initial wavelength array
:type wave0: numpy.float64
:param intens0: initial intensity array
:type intens0: numpy.float64
:param wave1: wavelength array at which we interpolate
:type wave1: numpy.float64
:return intens1: final intensity array
:rtype intens1: numpy.float64
"""
tck = splrep(wave0, intens0, k=3)
intens1 = splev(wave1, tck)
return intens1
def is_within_interval(v, arr):
"""
Tests whether value v lies within interval [min(arr); max(arr)]
:param v: tested values
:type v: numpy.float64
:param arr: tested array
:type v: numpy.float64
:return:
:param:
:type: bool
"""
# print v, max(arr), min(arr)
if (v - max(arr) > ZERO_TOLERANCE) | (min(arr) - v > ZERO_TOLERANCE):
return False
else:
return True
def generate_least_number(l):
"""
Goes over integer in list and finds the
smallest integer not in the list.
:param l: the list
:return: int the smallest integer
"""
num = 0
while num in l:
num += 1
return num
def keys_to_lowercase(d):
"""
Converts dictionary keys to lowercase
:param d the converted dictionary
:return: dnew
"""
dnew = {}
for key in d.keys():
keynew = key.lower()
dnew[keynew] = d[key]
return dnew
def parlist_to_list(l, property='value'):
"""
Converts a list of Parameter class to a
regular list - only the property is returned
:param l:
:param prop:
:return:
"""
ol = []
for par in l:
ol.append(par[property])
return ol
def sum_dict_keys(d):
"""
Sums dictionary key records.
:param d: the dictionary
:return: s the sum
"""
s = 0.0
for key in d.keys():
s += d[key]
return s
def read_text_file(f):
"""
Reads ascii file f.
:param f: the file
:type f: str
:return lines: list of all lines within file f
:rtype: list
"""
ifile = open(f, 'r')
lines = ifile.readlines()
ifile.close()
return lines
def renew_file(f):
"""
Deletes an existing file.
:param f:
:return:
"""
ofile = open(f, 'w')
ofile.close()
def rotate_spectrum(wave, intens, vrot, epsilon=0.6, interpolate_back=True):
"""
Rotates a spectrum represented by arrays wave and intes to the prjected
rotational velocity vrot.
:param wave: wavelength array
:type wave: numpy.float64
:param intens: intensity array
:type intens: numpy.float64
:param vrot: projected rotational velocity in km/s
:type vrot: float
:param epsilon: Coefficient of linear limb-darkening.
:type epsilon: float
:param interpolate_back: interpolate the spectrum back to the original wavelength sampling
:type interpolate_back: bool
:return intens: the rotated spectrum in the original wavelength sanmpling
:rtype intens: numpy.float64
:return intens_conv: the rotated spectrum equidistant in rv
:rtype intens_conv: numpy.float64
:return wave_conv: the wavelength array equidistant in rv
:rtype wave_conv: numpy.float64
"""
if vrot > ZERO_TOLERANCE:
# we need it equidistant in RV
wave_log = np.log(wave)
rv = np.linspace(wave_log[0], wave_log[-1], len(wave))
step = rv[1] - rv[0]
# interpolate
intens_rv = interpolate_spec(wave_log, intens, rv)
# scale rotational velocity with light speed
vrot = 1000 * vrot / c.value
# get the kernel
# velocity vector
n = int(np.ceil(2 * vrot / step))
rv_ker = np.arange(n) * step
rv_ker = rv_ker - rv_ker[-1] / 2.
y = 1 - (rv_ker / vrot) ** 2
# the kernel
kernel = (2 * (1 - epsilon) * np.sqrt(y) + np.pi * epsilon / 2. * y) / (np.pi * vrot * (1 - epsilon / 3.0))
kernel = kernel / kernel.sum()
# convolve the flux
intens_conv = fftconvolve(1 - intens_rv, kernel, mode='same')
if n % 2 == 1:
rv = np.arange(len(intens_conv)) * step + rv[0]
else:
rv = np.arange(len(intens_conv)) * step + rv[0] - step / 2.
wave_conv = np.exp(rv)
# interpolate back
if interpolate_back:
intens = interpolate_spec(wave_conv, 1 - intens_conv, wave)
return intens
else:
return 1 - intens_conv, wave_conv
def shift_spectrum(wave, RV):
"""
Doppler-shifts spectrum.
:param wave: original wavelength array
:type wave: numpy.float64
:param RV: radial velocity in km/s
:type RV: float
:return new_wave: shifted wavelength array
:rtype new_wave: numpy.float64
"""
# shifts the wavelengths
new_wave = wave * (1 + RV * 1000 / c.value)
return new_wave
def select_index_for_multiple_keywords(d, **kwargs):
"""
From a dictionary of lists selects
one index meeting all requirements.
:param kwargs:
:return:
"""
keys = d.keys()
length = len(d[keys[0]])
for i in range(0, length):
for k in keys:
if d[k] == kwargs[k] and k == keys[-1]:
return i
return -1
def string2bool(s):
"""
Converts string to boolean.
:param s:
:return:
"""
if s.lower() in ['true', '1']:
return True
else:
return False
def write_numpy(f, cols, fmt):
"""
An example of lack of brain of the main developer of this "code".
:param f: outputfile or handler
:param cols: block of data to be writte
:param fmt: format of the blocs
:return: None
"""
np.savetxt(f, cols, fmt=fmt)
| 10,363 | 24.033816 | 115 | py |
pyterpol | pyterpol-master/synthetic/makespectrum.py | import os
import sys
import copy
import warnings
import numpy as np
import matplotlib.pyplot as plt
from astropy.constants import c
from auxiliary import is_within_interval
from auxiliary import instrumental_broadening
from auxiliary import interpolate_spec
from auxiliary import interpolate_block_faster
from auxiliary import read_text_file
from auxiliary import rotate_spectrum
from auxiliary import shift_spectrum
from auxiliary import ZERO_TOLERANCE
from defaults import default_grid_order
from defaults import gridDirectory
from defaults import grid_files
from defaults import gridListFile
from defaults import ABS_default_grid_order
from defaults import ABS_gridDirectory
from defaults import ABS_grid_files
from defaults import ABS_gridListFile
# CONSTANTS
class SyntheticSpectrum:
def __init__(self, f=None, wave=None, intens=None, do_not_load=False, **props):
"""
Reads the synthetic spectrum and its properties.
input:
f.. file with the spectrum
wave.. wavelength vector of the synthetic spectrum
intens.. intensity vector of the synthetic spectrum
do_not_load.. switch for cases, when we want to build the
class but do not want to load the spectrum.
**props.. properties of the spectrum, in the correct type
"""
# reads the spectrum
if f is not None:
# from file
# this delay reading of the data
self.filename = f
if not do_not_load:
self.loaded = True
self.wave, self.intens = np.loadtxt(f, unpack=True, usecols=[0, 1])
self.measure_spectrum()
else:
self.loaded = False
else:
# wavelengths and intensities are given
self.wave = wave
self.intens = intens
self.measure_spectrum()
self.loaded = True
self.filename = None
# setups properties of the synthetic spectrum
self.properties = []
for key in props.keys():
setattr(self, key.lower(), props[key])
self.properties.append(key.lower())
def __getitem__(self, key):
"""
Returns an attribute of the synthtic spectrum.
Works only with properties.
input:
key.. searched attribute
output:
prop.. value of the attributr, if not present, False
"""
if not hasattr(self, key):
return False
else:
return getattr(self, key)
def __setitem__(self, key, value):
"""
Changes physical attribute. If it does not
exist, exception is raised.
input:
key.. the attribute to be changes
value.. nbew value of the attribute
"""
if not hasattr(self, key):
raise AttributeError('The atribute %s does not exist.' % key)
else:
setattr(self, key, value)
def __str__(self):
"""
String representation.
"""
string = ""
# if taken from file, prints its name
if self.filename is not None:
string = string + "filename:%s " % (self.filename)
string = string + "loaded:%s " % (str(self.loaded))
# prints properties of the spectrum
for prop in self.properties:
string = string + "%s:%s " % (prop, str(self[prop]))
# get the wavelength boundaries
if self.loaded:
string += "(wmin, wmax): (%s, %s)" % (str(self.wmin), str(self.wmax))
string = string + '\n'
return string
def check_boundaries(self, wmin, wmax):
"""
Checks that the given wavelengths do not
overlap the sythetic spectra.
intput:
wmin = minimal wavelength
wmax = maximal wavelength
"""
# lets have a special case, where the boundaries are None
if wmin is None:
wmin = self.wmin
if wmax is None:
wmax = self.wmax
if (wmin - (self.wmin - self.step) < ZERO_TOLERANCE) | \
(wmax - (self.wmax + self.step) > ZERO_TOLERANCE):
return False
else:
return True
def keys(self):
"""
Returns a list of properties.
"""
return self.properties
def load_spectrum(self, f=None):
"""
Loads the spectrum and stores it within the type.
input:
f.. filename
"""
if f is not None:
self.filename = f
# check if a binary representation exists -- then load it
binary_file = self.filename + '.npz'
if os.path.isfile(binary_file):
npz = np.load(binary_file, mmap_mode='r')
self.wave = npz['arr_0']
self.intens = npz['arr_1']
# otherwise, load ascii (very slow!) and save it as binary
else:
self.wave, self.intens = np.loadtxt(self.filename, unpack=True, usecols=[0, 1])
print("Saving binary file: " + str(binary_file))
np.savez(binary_file, self.wave, self.intens)
# measures the spectrum and marks it as loaded
self.measure_spectrum()
self.loaded = True
def measure_spectrum(self):
"""
Stores maximal, minimal wavelength and step within the type.
"""
# saves properties of synthetic
# spectra - min, max, step
self.wmin = self.wave.min()
self.wmax = self.wave.max()
self.step = self.wave[1] - self.wave[0]
def pad_continuum(self, wave, intens, bumpsize):
"""
Pads synthetic spectrum with continua at
each end.
input:
wave, intens.. the input spectrum
output:
bump_wave, bump_intens.. the 1-padded spectrum
"""
# gets properties of teh spectrum
w0 = wave[0]
wn = wave[-1]
step = wave[1] - wave[0]
# left bump
l_bump_wave = np.arange(w0 - bumpsize, w0, step)
# left bump
r_bump_wave = np.arange(wn + step, wn + bumpsize, step)
# continuum - just ones
# l_cont = np.ones(len(l_bump_wave))
# r_cont = np.ones(len(r_bump_wave))
# continuum - just ones
l_cont = 1.0 - np.linspace(0, bumpsize, len(l_bump_wave)) * (1.0 - intens[0]) / bumpsize
r_cont = intens[-1] + np.linspace(0, bumpsize, len(r_bump_wave)) * (1.0 - intens[-1]) / bumpsize
# cretes empty arrays
total_length = len(l_bump_wave) + len(wave) + len(r_bump_wave)
bump_wave = np.zeros(total_length)
bump_intens = np.zeros(total_length)
# copy the bumpers and the spectra
imin = 0
imax = 0
for w, c in zip([l_bump_wave, wave, r_bump_wave], [l_cont, intens, r_cont]):
imax += len(w)
bump_wave[imin:imax] = w
bump_intens[imin:imax] = c
imin = imax
return bump_wave, bump_intens
def get_spectrum(self, wave=None, rv=None, vrot=None, lr=1.0, korel=False,
only_intensity=False, wmin=None, wmax=None, keep=False,
fwhm=None):
"""
Return the sythetic spectrum stored within the class. If
a set of wavelengths is provided, an interpolated spectrum
is returned.
input:
optional:
wave.. array of deesired wavelengths
rv.. radila velocity in km/s
vrot.. projected rotational velocity in km/s
only_intensity.. returns intensity only
:param korel
:param keep
output:
wave, intens.. synthetic spectrum
"""
# checks that we do not pass negative values
if vrot is not None and vrot < 0.0:
warnings.warn('vrot cannot be negative! Setting to zero!')
vrot = 0.0
if wave is None:
# for some reason we want to work with the
# whole spectrum
# print wmin, wmax
if wmin is not None and wmax is not None:
wave, intens = self.select_interval(wmin, wmax)
else:
wave = self.wave
intens = self.intens
syn_wave = wave.copy()
# adds the instrumental broadening
if fwhm is not None and fwhm > ZERO_TOLERANCE:
intens = instrumental_broadening(syn_wave, intens, width=fwhm)
if vrot is not None and vrot > ZERO_TOLERANCE:
# rotates the spectrum
# print vrot
intens = rotate_spectrum(syn_wave, intens, vrot)
if rv is not None and abs(rv) > 0.0:
# if we want to shift it, we need to pad it,
# so it does not have to extrapolate
w0min = wave.min()
w0max = wave.max()
mins = np.array([w0min, w0max])
WAVE_BUMP = np.ceil(np.max(np.absolute(mins * (1 + 1000 * rv / c.value) - mins)))
syn_wave, intens = self.pad_continuum(syn_wave, intens, WAVE_BUMP)
# shift it in RV
syn_wave = shift_spectrum(syn_wave, rv)
# the spectrum is shrinked
if lr is not None and abs(lr - 1.0) > ZERO_TOLERANCE:
intens = intens*lr
if np.any([x != None for x in [rv, vrot]]):
# interpolates back
intens = interpolate_spec(syn_wave, intens, wave)
else:
# we are interpolating, so
# we check boundaries and we
# also add some more points
# at each end of the spectrum
# because we might want to
# operate with it a bit
# usually, if it does not fit,
# we can take a longer spectrum,
# so there is no point in padding the
# spectrum # the extension is
w0min = wave.min()
w0max = wave.max()
# the velocity shift rounded up
mins = np.array([w0min, w0max])
# Securing additional points on spectrum sides
# has sense only if we plan to shift it in RV
if rv is not None and abs(rv) > ZERO_TOLERANCE:
WAVE_BUMP = np.ceil(np.max(np.absolute(mins * (1 + 1000 * rv / c.value) - mins)))
else:
WAVE_BUMP = 0.0
wmin = w0min - WAVE_BUMP
wmax = w0max + WAVE_BUMP
# print wmin, wmax, self.wmin, self.wmax
if not self.check_boundaries(wmin, wmax):
warnings.warn('Synthetic spectra do not cover the whole wavelength region' \
' extrapolation has to be employed and THAT IS DANGEROUS! Note that' \
' each spectrum is extended by %f Angstrom at each side.' % (WAVE_BUMP))
# the part of the spectrum is selected
# there is no point in working with the
# whole dataset
# print wmin, wmax
# print len(self.wave)
syn_wave, intens = self.select_interval(wmin, wmax)
# print len(syn_wave), len(intens)
# adds the instrumental broadening
if fwhm is not None and fwhm > ZERO_TOLERANCE:
# intens = instrumental_broadening(syn_wave, intens, width=fwhm)
intens = instrumental_broadening(syn_wave, intens, width=fwhm)
# rotates the spectrum
if vrot is not None and vrot > ZERO_TOLERANCE:
# intens = rotate_spectrum(syn_wave, intens, vrot)
# print syn_wave
intens, syn_wave = rotate_spectrum(syn_wave, intens, vrot, interpolate_back=False)
# adjusts the spectrum for the radial velocity
if rv is not None and abs(rv) > ZERO_TOLERANCE:
syn_wave = shift_spectrum(syn_wave, rv)
# the spectrum is shrinked
if lr is not None and abs(lr - 1.0) > ZERO_TOLERANCE:
intens = intens*lr
# print len(syn_wave), len(wave)
# interpolates to the user specified wavelengths
intens = interpolate_spec(syn_wave, intens, wave)
# if we want to extract the spectra in KOREL format
if korel:
intens = 1.0 - (lr - intens)
# if we want to update the class with what
# we computed
if keep:
# update the size of the spectrum
self.intens = intens
self.wave = wave
self.measure_spectrum()
#update its parameters
for attr, val in zip(['rv', 'vrot', 'lr', 'korel'], [rv, vrot, lr, korel]):
if val is not None:
setattr(self, attr, val)
self.properties.append(attr)
return
if only_intensity:
return intens
else:
return wave, intens
def get_size(self):
"""
Gets the size of the spectrum i.e. wmin, wmax and step.
output:
props.. dictionary with records 'wmin', 'wmax', 'step'
"""
if self.loaded:
# guarantees fresh result
self.measure_spectrum()
return self.wmin, self.wmax, self.step
else:
raise Exception('Spectrum has not been loaded yet.')
def get_properties(self):
"""
Returns dictionary with the physical properties of the
synthetic spectrum.
Output:
props.. physical properties of the sythetic spectrum
"""
# return dictionary with the physical properties
props = {key: self[key] for key in self.properties}
return props
def plot(self, ax=None, savefig=False, figname=None, **kwargs):
"""
:param figname
:param savefig
:param ax: AxesSubplot
:param kwargs:
:return:
"""
w = self.wave
i = self.intens
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111)
props = str({prop: self[prop] for prop in self.properties})
ax.plot(w, i, label=props, **kwargs)
ax.set_xlim(self.wmin, self.wmax)
ax.set_ylim(0.95*i.min(), 1.05*i.max())
ax.set_xlabel('$\lambda(\AA)$')
ax.set_ylabel('$F_{\lambda}$(rel.)')
ax.legend(fontsize=10)
# save the figure
if savefig:
if figname is None:
figname = []
for key in self.properties:
# print key, self.properties
figname.extend([key, str(self[key])])
figname.extend(['wmin', str(self.wmin)])
figname.extend(['wmax', str(self.wmax)])
figname = '_'.join(figname) + '.png'
# save the plot
plt.savefig(figname)
def select_interval(self, wmin, wmax):
"""
Selects a spectral interval from the
synthetic spectrum.
:param wmin: minimal wavelength
:param wmax: maximal wavelength
:return wave: wavelength vector
:return intens: intensity vector
"""
# print wmin, wmax, self.wave
ind = np.where((self.wave >= wmin) & (self.wave <= wmax))[0]
# print ind
wave = self.wave[ind]
intens = self.intens[ind]
return wave, intens
def set_linear_wavelength(self, wmin, wmax, step):
"""
In case we want to attach linear wavelengths.
:param wmin
:param wmax
:param step
"""
self.wave = np.arange(wmin, wmax + step / 2., step)
def truncate_spectrum(self, wmin=None, wmax=None):
"""
Truncates the spectrum.
input:
wmin, wmax.. boundaries in wavelength
"""
if self.loaded is False:
raise Exception('The spectrum was not loaded.')
else:
# if there is a boundary missing
# if both, we are waisting compupter time
if wmin is None:
wmin = self.wave.min()
elif wmax is None:
wmax = self.wave.max()
# checks that the interval (wmin, wmax) lies within the
# spectrum. If not exception is raised
if (self.wave.min() > wmin) | (self.wave.max() < wmax):
raise ValueError('The spectrum %s does not cover the whole spectral region <%s,%s>.' % \
(str(self).rstrip('\n'), str(wmin), str(wmax)))
# does the trunctation
ind = np.where((self.wave >= wmin) & (self.wave <= wmax))[0]
# ind = np.where(((self.wave - wmin) >= -ZERO_TOLERANCE) & ((self.wave - wmax) <= ZERO_TOLERANCE))[0]
self.wave = self.wave[ind]
self.intens = self.intens[ind]
def write_spectrum(self, filename='synspec.dat', fmt='%12.6f %12.8e', **kwargs):
"""
Writes the current synthetic spectrum.
:return:
"""
header = str(self.get_properties())
np.savetxt(filename, np.column_stack([self.wave, self.intens]), fmt=fmt, header=header)
class SyntheticGrid:
def __init__(self, mode='default', flux_type='relative', debug=False):
"""
Setup the grid.
input:
mode..
"""
# import from defaults
self.default_grid_order = default_grid_order
self.gridDirectory = gridDirectory
self.grid_files = grid_files
self.gridListFile = gridListFile
# Table containing list of the SyntheticSpectrum types
self.SyntheticSpectraList = []
# Table containing all eligible values
self.parameterList = []
self.columns = []
# grid preference order
self.gridOrder = None
# reads default grids
if mode.lower() != 'custom':
self.setup_defaults(mode, flux_type)
# updates debug mode
self.debug = debug
# Initializes array in which the wavelength
# vector is stored
self.wave = None
def __str__(self):
"""
String representation.
"""
string = "List of spectra:\n"
for rec in self.SyntheticSpectraList:
string = string + str(rec)
return string
def check_properties(self, **kwargs):
"""
Checks that at least some spectra have
"""
def clear_all(self):
"""
Empties SyntheticSpectraList
"""
self.SyntheticSpectraList = []
def deselect_exact(self, l, **props):
"""
Deletes all cases where we interpolate at exact value.
:param l: list of spectra selected with self.select_parameters
:param props: values at which we interpolate
:return:
"""
l = np.array(l)
# print np.shape(l)[-1]
keys = props.keys()
# go column by column
for i in range(0, np.shape(l)[-1]):
v = props[keys[i]]
# print v, np.unique(l[:,i])
# deselects extact matches
if np.any(abs(np.unique(l[:,i]) - v) < ZERO_TOLERANCE):
ind = np.where(abs(l[:,i] - v) < ZERO_TOLERANCE)
l = l[ind]
return l
def get_synthetic_spectrum(self, params, wave, order=2, step=0.01, padding=20.0):
"""
Method which computes the interpolated spectrum
and wraps it within SyntheticSpectrum class. This
function should be accessed by the user.
input:
params.. dictionary containing values at which
we want to interpolate
order.. number of spectra at which we are goingx
to interpolate, i.e. the order of the
fit is k = order-1 for order < 4 and
k = 3 for order > 4.
wave.. wavelength vector for which the synthetic
spectrum should be created.
"""
if isinstance(wave, (list, tuple)):
wave = np.array(wave)
# sets up the equidistant wavelength vector
wmin = wave.min() - padding
wmax = wave.max() + padding
# print wmin, wmax, step, order, padding
# overwrite the wave vector for
self.set_wavelength_vector(wmin, wmax, step)
# first of all we need to get list of parameters,
# which the program will interpolate in
parlist, vals, keys = self.select_and_verify_parameters(order=order, **params)
# second creates a list of the spectra used for interpolation
spectra = self.get_spectra_for_interpolation(parlist, keys, step=step,
wmin=wmin, wmax=wmax)
# interpolates the spectra
intens = self.interpolate_spectra(parlist, spectra, vals)
# wrapps the interpolated synthetic spectrum within SyntheticSpectrum class
spectrum = SyntheticSpectrum(wave=self.wave.copy(), intens=intens, **params)
return spectrum
def get_all(self, **kwargs):
"""
Returns all spectra having a certain property, or
the property is equal to some value. One can list
all spectra that have a certain value.
input:
kwargs.. a dictionary of property = value
"""
# just in case we got empty dictionary
if len(kwargs.keys()) == 0:
return self.SyntheticSpectraList.copy()
# goes through each stored synthetic spectrum
spectra = []
for rec in self.SyntheticSpectraList:
# goes through passed kwargs
for i, key in enumerate(kwargs.keys()):
# print rec[key]
# print kwargs[key]
# keys agrees
if key.lower() in rec.keys():
# values are the same
if (abs(kwargs[key] - rec[key.lower()]) < ZERO_TOLERANCE):
# and we are checking the last record
if i == (len(kwargs.keys()) - 1):
spectra.append(rec)
else:
break
if len(spectra) == 0:
warnings.warn("No eligible spectrum was found! You probably got out of the grid.")
return spectra
def get_available_values(self, prop, **constraints):
"""
Lists all available properties in a sorted array.
input:
prop.. the searched property
grid.. list of SyntheticSpectrum types -
in case we want to search a narrowed
down list
"""
# lets say we want to contraint the grid
if len(constraints.keys()) == 0:
grid = self.SyntheticSpectraList
else:
grid = self.get_all(**constraints)
# returns all eligible values
values = []
prop = prop.lower()
for rec in grid:
if prop in rec.keys() and rec[prop] not in values:
values.append(rec[prop])
return np.sort(values)
def get_available_values_fast(self, prop, **constraints):
"""
Returns possible values of a parameter.
"""
parLis = np.array(self.parameterList)
# print parLis
for key in constraints.keys():
# value
v = constraints[key]
# constraining column
col = self.columns.index(key)
ind = np.where(abs(parLis[:, col] - v) < ZERO_TOLERANCE)[0]
# narrow down the list
parLis = parLis[ind]
col = self.columns.index(prop.lower())
# return sorted list
return sorted(set(parLis[:, col]))
def get_spectra_for_interpolation(self, parList, header, step=0.01, wmin=None, wmax=None):
"""
Creates a list of spectra - physical spectra - this
will require some more tweaking. I would like to account
for:
1) spectrum is/is not loaded.
2) spectrum should be somehow trucated
there is no need to load all - maybe it
will even be better to load only small.
This will be controlled through this method
which loads the data. Also some spectra
can be unloaded after several iterations.
input:
output:
"""
# empty list for the spectra
syntheticSpectra = []
# switch for spectra truncation
if (wmin is None) and (wmax is None):
truncateSpectrum = False
else:
truncateSpectrum = True
# go through each row of the parameter list
for i, row in enumerate(parList):
# retrieve spectrum
props = {prop: row[j] for j, prop in enumerate(header)}
spectrum = self.get_all(**props)
# if there are two or more spectra for the
# same temperature
if len(spectrum) > 1:
spectrum = self.resolve_degeneracy(spectrum)
else:
spectrum = spectrum[0]
# load the spectrum if not
# check that spectrum is loaded and that its fitting within boundaries
if (not spectrum.loaded):
if self.debug:
print "Loading spectrum: %s" % (str(spectrum).rstrip('\n'))
else:
print "Loading spectrum: %s" % (str(spectrum).rstrip('\n'))
# loads the spectrum
spectrum.load_spectrum()
# truncates the loaded spectrum
if truncateSpectrum:
if self.debug:
print "Truncating spectrum to: (%f,%f)" % (wmin, wmax)
spectrum.truncate_spectrum(wmin, wmax)
else:
# check that the synthetic spectrum has sufficient size
# if not reaload it
if not spectrum.check_boundaries(wmin, wmax):
spectrum.load_spectrum()
# truncates the re-loaded spectrum
if truncateSpectrum:
if self.debug:
print "Truncating spectrum to: (%f,%f)" % (wmin, wmax)
spectrum.truncate_spectrum(wmin, wmax)
if self.debug:
print "Spectrum loaded: %s" % (str(spectrum).rstrip('\n'))
# We have to be sure that the spectra aren't off
# each other by less than one step
swmin, swmax, sstep = spectrum.get_size()
if np.any(np.abs([swmin - wmin, swmax - wmax, sstep - step]) > ZERO_TOLERANCE):
if self.debug:
print "Spectrum %s does not have the wavelength scale (wmin, wmax,step)=(%s, %s, %s)" % \
(str(spectrum).rstrip('\n'), str(wmin), str(wmax), str(step))
# if they do not agree - we have to interpolate
# it is cruacial that all spectra have the same
# wavelength scale
if self.wave == None:
wave = np.arange(wmin, wmax + step / 2., step)
else:
wave = self.wave
# interpolate the spectrum to the wavelength scale
intens = spectrum.get_spectrum(wave=wave, only_intensity=True)
else:
if self.debug:
print "Wavelenght scale of spectrum: %s is (wmin, wmax,step)=(%s, %s, %s)." % \
(str(spectrum).rstrip('\n'), str(wmin), str(wmax), str(step))
# read out the intensities
intens = spectrum.get_spectrum(only_intensity=True)
# print len(intens)
# append spectrum to the list
syntheticSpectra.append(intens)
return syntheticSpectra
def interpolate_spectra(self, parList, synspectra, parameters):
"""
Interpolates in all parameters.
input:
parlist.. list generated with select parameters method
synspectra.. list generated with the get_spectra_for_interpolation
method
parameters.. list of parameter values in which we interpolate
the order must be the same as in case of parlist
this is guaranteed by the ouput of select_and_verify_parameters method
output:
intens.. the resulting array of intensities
"""
# convert to arrays, easier to handle
plist = np.array(parList)
syns = np.array(synspectra)
ncol = len(plist[0])
pars = parameters
while ncol > 0:
# extract new value
xnew = pars[ncol - 1]
# print xnew
new_plist = []
new_syns = []
# take the first row
j = 0
while j < len(plist):
row = plist[j]
# narrow it down - all values
# that have the first ncol-1
# values the same are chosen
t_plist = plist.copy()
t_syns = syns.copy()
for i in range(ncol - 1):
ind = np.where(abs(t_plist[:, i] - row[i]) < ZERO_TOLERANCE)[0]
t_plist = t_plist[ind]
t_syns = t_syns[ind]
# if there is really nothing to interpolate in
# the one value is copied and we proceed to next
# step
if len(t_plist) == 1:
if self.debug:
print "Skipping interpolation in %s - there is only one spectrum for values %s." % \
(str(xnew), str(t_plist[:, :ncol - 1]))
intens = t_syns[0]
new_plist.append(row[:ncol - 1])
new_syns.append(intens)
j += len(ind)
continue
# sort according to the last columns
ind = np.argsort(t_plist[:, ncol - 1])
# extract the abscissa
x = t_plist[ind, ncol - 1]
t_syns = t_syns[ind]
if self.debug:
print "Interpolating in vector: %s at value %s." % (str(x), xnew)
# everything is sorted, we can interpolate
# unless our value is exact ofc.
intens = interpolate_block_faster(x, t_syns, xnew)
# add it to new plists and syns
new_plist.append(row[:ncol - 1])
new_syns.append(intens)
j += len(ind)
syns = np.array(new_syns)
plist = np.array(new_plist)
ncol = len(plist[0])
return syns[0]
@staticmethod
def list_modes():
"""
This method lists available modes for the SyntheticGrid.
:return:
"""
# go over differents modes
string = 'List of registered modes and their properties follows:\n'
for i in range(0, len(self.grid_files['identification'])):
string += ''.ljust(100,'=') + '\n'
string += 'mode: %s:\n' % self.grid_files['identification'][i]
string += 'directories: %s \n' % str(self.grid_files['directories'][i])
string += 'columns: %s\n' % str(self.grid_files['columns'][i])
string += 'families: %s\n' % str(self.grid_files['families'][i])
string += ''.ljust(100,'=') + '\n'
return string
def narrow_down_grid(self, **kwargs):
"""
To speed up computations, one can
narrow down the grid, to certain
family, or parameter range.
input:
One can either fix a parameter:
par = value
or fix an interval:
par = (vmin, vmax)
output:
list of synthetic spectra
"""
# separate fixed from free
fixed = {}
free = {}
for key in kwargs.keys():
if not isinstance(kwargs[key], (tuple, list)):
fixed[key] = kwargs[key]
else:
free[key] = kwargs[key]
# first narrow down the fixed ones
grid = self.get_all(**fixed)
# if there are no other restrictions -
# this option is covered with get_all
# method ofc.
if len(free.keys()) == 0:
return grid
else:
narrowed_grid = []
for rec in grid:
for i, key in enumerate(free.keys()):
if key.lower() in rec.keys():
if (rec[key.lower()] >= free[key][0]) & \
(rec[key.lower()] <= free[key][1]):
# all keys must agree
if i == len(free.keys()) - 1:
narrowed_grid.append(rec)
# if we narrowed it down to zero
if len(narrowed_grid) == 0:
warnings.warn("The narrowed-down grid is empty!!")
return narrowed_grid
def read_list_from_file(self, f, columns, directory=None, family=None):
"""
Reads list of grid spectra from a file.
input:
f.. file containing the records
columns.. column description
family.. family is provided
directory.. directory where the files are stored
this option should be used in case
when the path to the spectra = filename
is relative only within the file
"""
# read from text_file
if columns is None:
raise KeyError('Description of the input file was not specified.')
# There are two mandatory records - 1) path
# to the sythetic spectrum and 2) family tp
# which the spectrum belongs. By family I mean
# a published grid. In this family, there should
# not exist 2 spectra with the same properties
# Missing filename will raise an error,
# missing family will raise warnig, because
# all read spectra will be assigned the same family
hasFamily = False
if family is None:
for rec in columns:
if rec.upper() == 'FAMILY':
hasFamily = True
addFamily = False
else:
hasFamily = True
addFamily = True
if not hasFamily:
warnings.warn("The family (aka the grid) of spectra was not specified. Assigning family...")
families = self.get_available_values('FAMILY')
family = 'family' + str(len(families))
addFamily = True
# WE CHECK THAT THERE IS FILENAME RECORD
# FOR EACH SPECTRUM - WITHOU THAT WE WONT
# COMPUTE ANYTHING
hasFilename = False
for rec in columns:
if rec.upper() == 'FILENAME':
hasFilename = True
if not hasFilename:
raise KeyError('Record filename = path to the spectrum is missing in the column description!')
lines = read_text_file(f)
# go through file, line by line
for j, line in enumerate(lines):
# store one line = one spectrum info
rec = {}
data = line.split()
# make sure, we have description of all
# properties
if len(data) > len(columns):
raise KeyError('Description of some columns is missing.')
for i, col in enumerate(columns):
# the name should be the only string
if col.upper() in ['FAMILY']:
rec[col.upper()] = data[i]
elif col.upper() in ['FILENAME']:
rec[col.upper()] = os.path.join(directory, data[i])
else:
rec[col.upper()] = float(data[i])
# Adds family if needed
if addFamily:
rec['FAMILY'] = family
filename = rec.pop('FILENAME')
synspec = SyntheticSpectrum(f=filename, do_not_load=True, **rec)
# Adds the record so the synthetic spectra list
self.SyntheticSpectraList.append(synspec)
# Adds the record to the parameterList - so without family
rec.pop('FAMILY')
phys_cols = [x.lower() for x in columns if x not in ['FAMILY', 'FILENAME']]
self.parameterList.append([rec[col.upper()] for col in phys_cols])
# also stores identification of columns
if j == 0:
self.columns = phys_cols
def resolve_degeneracy(self, speclist):
"""
If there are more spectra having the same
parameters, one has to choose one prefered.
input:
speclist.. list of SyntheticSpectrum types corresponding to same properties
"""
# if we did not set up the order -> error
if self.gridOrder is None:
raise KeyError('There are same spectra for the same parameters.'
' I think it is because we have more grids, that overlap.'
' You can overcome this by setting gridOrder variable.')
indices = []
for i in range(0, len(speclist)):
# print speclist[i]['family']
indices.append(self.gridOrder.index(speclist[i]['family']))
# justr in case there was something peculiar
if np.any(indices < -1):
warnings.warn('At least one grid was not found in the gridOrder variable.'
' Verify that the names set in gridOrder agree with family names of spectra.')
# return spectrum with the smallest index
return speclist[np.argmin(indices)]
def select_and_verify_parameters(self, order=2, **props):
"""
A wrapper to the select_parameters method.
This method can deal with overlaps of grids.
But since it does not know the grid apriori,
it is unable to recognize wrong result.
This wrapper checks the result.
input:
order.. maximal number of interpolated spectra
props.. dictionary of interpolated parameters
output:
parlist.. each row represents one spectrum
which is needed to interpolate in
give props
vals.. values in which we interpolate
keys.. names of the interpolated
"""
# all parameters are defined lowercase
# so we have to convert it
for key in props.keys():
v = props.pop(key)
props[key.lower()] = v
if self.debug:
print "In select_and_verify_parameters: order=%i properties:" % (order)
print str(props)
# keys and values
keys = props.keys()
vals = [props[key] for key in props.keys()]
# gets the parameter list
# print order, props
parlist = self.select_parameters(order=order,**props)
# print parlist
# deselect reduntdant spectra
parlist = self.deselect_exact(parlist, **props)
if len(parlist) == 0:
raise Exception('Do %s lie within the grid? I do not think so...' % (str(props)))
if self.debug:
print 'Following parameters were chosen with select_parameters method:'
for row in parlist:
print row
# checks the result
temp = np.array(parlist)
# print temp, vals
for i, val in enumerate(vals):
# print val, temp[:, i], is_within_interval(val, temp[:, i])
if not is_within_interval(val, temp[:, i]):
raise ValueError('Parameters %s lie outside the grid.' % (str(props)))
return parlist, vals, keys
def select_parameters(self, values=[], order=2, constraints={}, **props):
"""
Creates a final list - this is still
first guess. I think that searching up
eligible values and spectra can be done
better.
input:
grid - synthetic spectraList, which is searched
order - how many spectra are used this is
adjusted dynamically if there are not
enough values
constraints - resolve conflicts between grids
props - properties in which we fit
output:
values of spectra for interpolation
"""
# extract the parameter and its values
key = props.keys()[0].lower()
v = props.pop(key)
# print key, constraints, props
# list eligible values for a given parameter
elig_vals = np.array(self.get_available_values_fast(key, **constraints))
# print key, elig_vals
# sorts the grid, from nearest to farthest
ind = np.argsort(abs(elig_vals - v))
vals = elig_vals[ind]
# equality check
# print vals, v, key
# what if the grid step is inhomogeneous? - actually it is
# in z - what shall we do, what shall we do???
if vals[:order].min() > v or vals[:order].max() < v:
# TODO think of something better than this!!!!!!
try:
lower = np.max(vals[np.where(vals - v < ZERO_TOLERANCE)[0]])
upper = np.min(vals[np.where(vals - v > ZERO_TOLERANCE)[0]])
vals = np.array([lower, upper])
except:
pass
# print lower, upper, vals
# checks that there is not equality
# if np.any(abs(vals - v) < ZERO_TOLERANCE):
# ind = np.argmin(abs(vals - v))
# vals = [vals[ind]]
#
# if self.debug:
# print "%s=%s is precise. Skipping choice of parameters." % (key, str(v))
# if the eligible values do not surround the parameter
if not is_within_interval(v, vals):
return values
# if there are no other spectra to interpolate in
if len(props.keys()) == 0:
for i in range(0, len(vals)):
row = []
# append those that are already fixed
for key in constraints.keys():
row.append(constraints[key])
# append the last parameter
row.append(vals[i])
# append the row
values.append(row)
# once 'order' spectra are appended, we can
# end
if i == order - 1:
break
return values
else:
j = 0
for i in range(0, len(vals)):
# add a constraint
constraints[key] = vals[i]
# recursively calls the function
values_new = self.select_parameters(values=copy.deepcopy(values), order=order, constraints=constraints,
**props)
# some searches are in vain - so we
# wait until meaningful calls accumulate
if len(values_new) > len(values):
j += 1
# copis the result, so we can go on
values = values_new
# remove constraint
constraints.pop(key)
if j == order:
break
return values
def setup_defaults(self, mode, flux_type):
"""
Given a key loads a grid stored within
the directory.
input:
mode.. one of the defaults mode OSTAR, BSTAR, POLLUX, AMBRE
defaulst = all
flux_type.. either relative or absolute
"""
# we do not want to bother with the case
mode = mode.upper()
flux_type = flux_type.upper()
# select the correct type of flux
# note we cannot overwrite globals, but only class variables
if flux_type == 'ABSOLUTE':
self.grid_files = ABS_grid_files
self.gridDirectory = ABS_gridDirectory
self.gridListFile = ABS_gridListFile
self.default_grid_order = ABS_default_grid_order
# select properties
ind = self.grid_files['identification'].index(mode)
if ind < 0:
raise ValueError('Default settings named %s not found.' % (mode))
dirs = self.grid_files['directories'][ind]
cols = self.grid_files['columns'][ind]
fams = self.grid_files['families'][ind]
# reads the grid files
for i, d in enumerate(dirs):
spectralist = os.path.join(self.gridDirectory, d, self.gridListFile)
directory = os.path.join(self.gridDirectory, d)
self.read_list_from_file(spectralist, cols, family=fams[i], directory=directory)
# also sets the default grid order
self.set_grid_order(self.default_grid_order)
def set_mode(self, mode='default'):
"""
Set different mode.
:param mode:
:return:
"""
debug = self.debug
self.__init__(mode=mode, debug=debug)
def set_grid_order(self, arr):
"""
Sets grid preference.
input:
arr = list of spectra grid1 > grid2 > grid3...
"""
self.gridOrder = arr
def set_wavelength_vector(self, wmin, wmax, step):
"""
Store the wavelength vector within the class.
input:
wmin.. minimal wavelength
wmax.. maximal wavelength
step.. step size in the wavelength
"""
nstep = int((wmax - wmin)/step)+1
self.wave = np.linspace(wmin, wmax, nstep)
| 45,773 | 34.319444 | 119 | py |
pyterpol | pyterpol-master/synthetic/defaults.py | # defaults settings - for more utility, this was transfered
# to init
import os, inspect
curdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# DEFINITIONS OF GRIDS OF RELATIVE SPECTRA
# ----------------------------------------------------------------------------------------------------------------------
gridDirectory = os.path.join("/".join(curdir.split('/')[:-1]), 'grids')
# name of the file containing records on synthetic spectra
gridListFile = 'gridlist'
grid_files = dict(
identification=['DEFAULT', 'OSTAR', 'BSTAR', 'POLLUX', 'AMBRE'],
directories=[
['OSTAR_Z_0.5', 'OSTAR_Z_1.0', 'OSTAR_Z_2.0', 'BSTAR_Z_0.5', 'BSTAR_Z_1.0', 'BSTAR_Z_2.0', 'POLLUX_Z_1.0',
'AMBRE_Z_1.0'],
['OSTAR_Z_0.5', 'OSTAR_Z_1.0', 'OSTAR_Z_2.0'],
['BSTAR_Z_0.5', 'BSTAR_Z_1.0', 'BSTAR_Z_2.0'],
['POLLUX_Z_1.0'],
['AMBRE_Z_1.0']
],
columns=[['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z']
],
families=[['OSTAR', 'OSTAR', 'OSTAR', 'BSTAR', 'BSTAR', 'BSTAR', 'POLLUX', 'AMBRE'],
['OSTAR', 'OSTAR', 'OSTAR'],
['BSTAR', 'BSTAR', 'BSTAR'],
['POLLUX'],
['AMBRE']
]
)
# stores default grid order
default_grid_order = ['BSTAR', 'OSTAR', 'AMBRE', 'POLLUX']
# DEFINITIONS OF GRIDS OF ABSOLUTE SPECTRA
# ----------------------------------------------------------------------------------------------------------------------
ABS_gridDirectory = os.path.join("/".join(curdir.split('/')[:-1]), 'grids_ABS')
# name of the file containing records on synthetic spectra
ABS_gridListFile = 'gridlist'
# POLLUX has a too narrow wavelength range => it was deleted
ABS_grid_files = dict(
identification=['DEFAULT', 'PHOENIX', 'BSTAR'],
directories=[
['OSTAR_Z_1.0', 'BSTAR_Z_1.0', 'POLLUX_Z_1.0'],
['BSTAR_Z_1.0'],
['PHOENIX_Z_1.0'],
],
columns=[
['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z'],
['FILENAME', 'TEFF', 'LOGG', 'Z'],
],
families=[
['OSTAR', 'BSTAR', 'POLLUX'],
['BSTAR'],
['PHOENIX'],
]
)
# stores default grid order
ABS_default_grid_order = ['BSTAR', 'OSTAR', 'POLLUX', 'PHOENIX']
| 2,466 | 34.242857 | 120 | py |
pyterpol | pyterpol-master/synthetic/__init__.py | 2 | 0 | 0 | py |
|
pyterpol | pyterpol-master/plotting/plotting.py | import copy
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
import numpy as np
from scipy.stats import norm
from pyterpol.synthetic.auxiliary import read_text_file
def get_walker(db, nchain, nwalker, niter):
"""
Retrieves a walker from the chain.
:param db:
:param nchain:
:param nwalker:
:param niter:
:return:
"""
rows = np.arange(niter)
rows = nchain + nwalker*rows
return db[rows]
def plot_walkers_for_one_param(db, ipar, nwalker, niter, ax):
"""
:param db:
:param ipar:
:param nwalker:
:param niter:
:param ax:
:return:
"""
# set teh iterations
iters = np.arange(niter)
# plot each walker
for i in range(0, nwalker):
w = get_walker(db, i, nwalker, niter)
ax.plot(iters, w[:,ipar], '-')
def plot_walkers(block, niter, nwalker, indices=None, labels=None, savefig=True, figname=None):
"""
:param block:
:param indices:
:param niter:
:param nwalker:
:param labels:
:param savefig:
:param figname:
:return:
"""
if figname is not None:
savefig = True
# define which parameters are plotted
if indices is None:
indices = np.arange(len(block[0]))
npar = len(indices)
# definethe plotting grid
ncol = 3
nrow = npar / ncol
if npar % ncol > 0:
nrow += 1
# create the grid and the figure
gs1 = gs.GridSpec(nrow, ncol, hspace=0.2, wspace=0.4)
fig = plt.figure(figsize=(4*ncol, 3*nrow), dpi=100)
# plot each figure
for j, ind in enumerate(indices):
# set label
if labels is None:
label = 'p' + str(ind).zfill(2)
else:
label = labels[j]
# set the position
icol = j % ncol
irow = j / ncol
ax = fig.add_subplot(gs1[irow, icol])
# plot the walkers
plot_walkers_for_one_param(block, ind, nwalker, niter, ax)
ax.set_xlabel('Iteration number', fontsize=8)
ax.set_ylabel(label, fontsize=8)
# save the figure
if savefig:
if figname is None:
figname = 'mcmc_convergence.png'
# plt.tight_layout()
plt.savefig(figname)
def plot_convergence(block, labels=None, relative=True, savefig=True, figname=None):
"""
Plots convergence of the chi^2 and of individual parameters.
:param block:
:param labels:
:param relative:
:param savefig
:param figname
:return:
"""
nrow, ncol = np.shape(block)
# normalize with the best value
# if relative is p[assed
if relative:
rel_block = copy.deepcopy(block)
for i in range(0, ncol):
rel_block[:,i] = block[:, i]/block[-1, i]
# start a new figure
fig = plt.figure(dpi=100, figsize=(15, 10))
ax = fig.add_subplot(111)
# plot convergence
for i in range(0, ncol):
# define the color
color = 0.1 +0.9*np.random.random(3)
if labels is not None:
ax.plot(rel_block[:,i], '-', color=color, label=labels[i])
else:
ax.plot(rel_block[:,i], '-', color=color)
ax.set_xlabel('Iteration number')
ax.set_ylabel('Relative value.')
ax.legend(loc=1, fontsize=10)
# save the plot
if savefig == True:
if figname is None:
figname = 'convergence.png'
plt.savefig(figname)
# try to produce another kind of plot
if ncol % 2 > 0:
nfigrow = ncol / 2 + 1
else:
nfigrow = ncol / 2
# setup the grid
gs1 = gs.GridSpec(nfigrow, 2, hspace=0.5)
# setup the figure
fig2 = plt.figure(dpi=100, figsize=(10, 3*nfigrow))
# plot convergence of individual parameters
for i in range(0, ncol):
ax = fig2.add_subplot(gs1[i/2, i%2])
ax.set_xlabel('Iteration number')
ax.set_ylabel('Value')
ax.set_ylabel(labels[i], fontsize=8)
ax.plot(block[:, i], 'k-', label=labels[i])
# ax.legend(loc=1)
# save the figure
fig2.savefig('convergence_2.png')
plt.close()
def plot_chi2_map(x, y, nbin=10, labels=None, savefig=True, figname=None):
"""
Plots a covariance map.
:param x parameter values
:param y parameter values
:param nbin number of bins in a histogram
:param labels
:param savefig
:param figname
:return:
"""
fs=8
# if user did not pass the labels
if labels == None:
labels = ['x', 'y']
# set up the figure
fig = plt.figure(figsize=(10,10), dpi=100)
var_axes = [221, 224]
var_data = [x, y]
# firs the plot of the variance
for i in range(0, 2):
ax = fig.add_subplot(var_axes[i])
# plot the histogram
n, bins, patches = ax.hist(var_data[i], nbin, normed=True, label=labels[0])
x_g = np.linspace(bins.min(), bins.max(), 50)
# plot the gaussian 'fit'
mean = var_data[i].mean()
var = var_data[i].std(ddof=1)
g = norm(loc=mean, scale=var)
ax.plot(x_g, g.pdf(x_g), 'r-')
# labeling
ax.set_xlabel(labels[i], fontsize=8)
ax.set_ylabel('$n_i/N$', fontsize=8)
ax.set_title(r'$\sigma$_%s=%.3f' % (labels[i], var), fontsize=8)
# plot the 2d chi2 map
ax = fig.add_subplot(223)
ax.hist2d(x, y, nbin, normed=True)
# compute the correlation
cov = ((x-x.mean())*(y-y.mean())).mean()
cor = cov/(x.std(ddof=1)*y.std(ddof=1))
# labelling
ax.set_xlabel(labels[0], fontsize=8)
ax.set_ylabel(labels[1], fontsize=8)
ax.set_title(r'$\rho$(%s, %s) = %.3f' % (labels[0], labels[1], cor), fontsize=8)
# save the figure
if savefig:
if figname is None:
figname = '_'.join(labels) + '.png'
plt.savefig(figname)
plt.close()
def plot_variance(x, nbin=10, label=None, savefig=True, figname=None):
"""
Plots a covariance map.
:param x parameter values
:param nbin number of bins in a histogram
:param labels
:param savefig
:param figname
:return:
"""
fs=8
# if user did not pass the labels
if label is None:
label = 'x'
# set up the figure
fig = plt.figure(figsize=(6,6), dpi=100)
# firs the plot of the variance
ax = fig.add_subplot(111)
# plot the histogram
n, bins, patches = ax.hist(x, nbin, normed=True, label=label)
x_g = np.linspace(bins.min(), bins.max(), 50)
# plot the gaussian 'fit'
mean = x.mean()
var = x.std(ddof=1)
g = norm(loc=mean, scale=var)
ax.plot(x_g, g.pdf(x_g), 'r-')
# labeling
ax.set_xlabel(label, fontsize=8)
ax.set_ylabel('$n_i/N$', fontsize=8)
ax.set_title(r'$\sigma$_%s=%.3f' % (label, var), fontsize=8)
ax.legend(fontsize=8)
# save the figure
if savefig:
if figname is None:
figname = label + '.png'
else:
figname += label+'.png'
plt.savefig(figname)
plt.close()
def read_fitlog(f):
"""
Reads the fitting log and stores it within a dictionary.
:param f:
:return:
"""
# read the file
lines = read_text_file(f)
# key counter and ouput dictionary
fitlog = {}
hkcounter = 0
# define header keys
head_keys = ['name', 'component', 'group']
for l in lines:
d = l.split()
# print d
for hk in head_keys:
if l.find(hk) > -1:
# groups are integers of course
if hk == 'group':
d[2:] = map(int, d[2:])
else:
d[2:] = d[2:]
# append the header info
fitlog[hk] = d[2:]
hkcounter += 1
break
# once we read all data, we end
if hkcounter == 3:
break
# print fitlog
# append data
fitlog['data'] = np.loadtxt(f)
return fitlog
def read_mc_chain(f):
"""
Reads the mcmc chain created with emcee
:param f: chain_file
:return:
"""
# read the file
lines = read_text_file(f)
# key counter and ouput dictionary
chainlog = {}
hkcounter = 0
# define header keys
head_keys = ['name', 'component', 'group']
for l in lines:
d = l.split()
# print d
for hk in head_keys:
if l.find(hk) > -1:
# groups are integers of course
if hk == 'group':
d[2:] = map(int, d[2:])
else:
d[2:] = d[2:]
# append the header info
chainlog[hk] = d[2:]
hkcounter += 1
break
# once we read all data, we end
if hkcounter == 3:
break
# load the file
d = np.loadtxt(f)
# get fit properties
nwalkers = int(np.max(d[:, 0])) + 1
niter = len(d[:, 0]) / nwalkers
npars = len(d[0]) - 2
# remove the first column with numbering
chainlog['data'] = d[:, 1:]
return chainlog, nwalkers, niter, npars
| 9,041 | 22.42487 | 95 | py |
pyterpol | pyterpol-master/plotting/__init__.py | 0 | 0 | 0 | py |
|
pyterpol | pyterpol-master/pyterpol_examples/Interface/output/example.py | """
This tutorial serves as demonstration of how to fit observed
spectra with Pyterpol.
Our observed spectra were created with the old C++ version of the
code. We have three spectra of a binary consisting of
primary: teff = 25000, g = 4.2, , vrot = 150, lr = 0.7, z = 1.0
secondary: teff = 18000, g = 4.2, , vrot = 50, lr = 0.3, z = 1.0
and various radial velocities. They look as if they were
observed spectra.
We have successfully fitted the data with the differential
evolution algorithm form the SciPy library. Our next step is
to get the output from fitting.
"""
import pyterpol
# First load the session
itf = pyterpol.Interface.load('fitted.itf')
# check that everything has loaded correctly
print itf
"""
==============================================StarList==============================================
Component: primary
name: rv value: 49.9857247022 vmin: -120.0 vmax: 120.0 fitted: True group: 1 _typedef: None
name: rv value: 19.9864936135 vmin: -120.0 vmax: 120.0 fitted: True group: 2 _typedef: None
name: rv value: 100.009478284 vmin: -120.0 vmax: 120.0 fitted: True group: 3 _typedef: None
name: teff value: 25000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 150.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
Component: secondary
name: rv value: -49.9460982465 vmin: -120.0 vmax: 120.0 fitted: True group: 1 _typedef: None
name: rv value: -19.9589330606 vmin: -120.0 vmax: 120.0 fitted: True group: 2 _typedef: None
name: rv value: -99.9753261321 vmin: -120.0 vmax: 120.0 fitted: True group: 3 _typedef: None
name: teff value: 18000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 50.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6330.0, 6375.0):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (6500.0, 6600.0):
component: all groups: {'lr': 0}
============================================ObservedList============================================
List of all attached spectra:
filename: a component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 1} (min, max): (6250.0, 6799.9499999999998)
filename: b component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 2} (min, max): (6250.0, 6799.9499999999998)
filename: c component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 3} (min, max): (6250.0, 6799.9499999999998)
===============================================Fitter===============================================
Fitter: sp_diff_evol optional_arguments: {}
Initial parameters:
====================================================================================================
"""
# write a dictionary of parameters and their errors
itf.write_fitted_parameters(outputname='result.dat')
"""
c: primary p: rv g: 1 value: 49.9857 lower: -0.1070 upper: 0.1117
c: primary p: rv g: 2 value: 19.9865 lower: -0.1184 upper: 0.0943
c: primary p: rv g: 3 value: 100.0095 lower: -0.0921 upper: 0.1048
c: secondary p: rv g: 1 value: -49.9461 lower: -0.0866 upper: 0.1056
c: secondary p: rv g: 2 value: -19.9589 lower: -0.1161 upper: 0.0974
c: secondary p: rv g: 3 value: -99.9753 lower: -0.0940 upper: 0.1116
"""
# first we would like to see how our comparisons look like
# naming the figures using 'figname' is not mandatory, nut
# it is advised.
itf.plot_all_comparisons()
# we may want to export the synthetic spectra
# we can write one component in one region -
# this will export a synthetic spectrum for each
# rv_group
itf.write_synthetic_spectra(component='primary', region='region00', outputname='primary')
# or we can write everything together
itf.write_synthetic_spectra()
# convergence can be plotted - by default chi^2.
itf.plot_convergence(figname='convergence_chi.png')
# interface plots the data from the fit log, so it is
# better to save it - also even if our model/fitlog changed
# we can still plot the convergence, stored witihn a fitlog
itf.plot_convergence(parameter='all', f='fit.log', figname='convergence_parameters.png')
# and we can also plot covariance, which will tell us
# what is the uncertainty of the fit - we are interested in rv
# This will plot covariances between rvs for group 1s
itf.plot_covariances(parameters=['rv'], groups=[1], figname='rv_g_1')
# Again it is not necessary to us the registered fitlog
itf.plot_covariances(f='fit.log', parameters=['rv'], groups=[2], figname='rv_g_2')
| 5,219 | 47.333333 | 146 | py |
pyterpol | pyterpol-master/pyterpol_examples/Interface/setup/example.py | """
This tutorial serves as demonstration of how to set up an Interface.
Our observed spectra were created with the old C++ version of the
code. We have three spectra of a binary consisting of
primary: teff = 25000, g = 4.2, , vrot = 150, lr = 0.7, z = 1.0
secondary: teff = 18000, g = 4.2, , vrot = 50, lr = 0.3, z = 1.0
and various radial velocities. They look as if they were
observed spectra.
We will make advantage of the default behavior.
"""
import pyterpol
# 1) First we create a starlist
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=25000., logg=4.2, vrot=150., lr=0.7, z=1.0, rv=0.0)
sl.add_component(component='secondary', teff=18000., logg=4.2, vrot=50., lr=0.3, z=1.0, rv=0.0)
# 2) Now think of regions where we might want to do
# the comparison
rl = pyterpol.RegionList()
# the silicon lines
rl.add_region(wmin=6330, wmax=6375)
# Halpha
rl.add_region(wmin=6500, wmax=6600)
# 3) Now attach the data
ol = pyterpol.ObservedList()
obs = [
dict(filename='a'),
dict(filename='b'),
dict(filename='c'),
]
ol.add_observations(obs)
# 4) create the interface
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
itf.setup()
# review the class - this is a nice example of how the
# default groups are assigned. Both components have now
# six rvs and two lrs. The explanation is simple - we have
# three observed spectra and two regions. There is one
# relative luminosity for each spectrum. and one for
# each spectrum and each region
print itf
"""
==============================================StarList==============================================
Component: primary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 4 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 5 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 6 _typedef: <type 'float'>
name: teff value: 25000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: <type 'float'>
name: vrot value: 150.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: <type 'float'>
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 1 _typedef: <type 'float'>
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: <type 'float'>
Component: secondary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 4 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 5 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 6 _typedef: <type 'float'>
name: teff value: 18000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: <type 'float'>
name: vrot value: 50.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: <type 'float'>
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 1 _typedef: <type 'float'>
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: <type 'float'>
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6330, 6375):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (6500, 6600):
component: all groups: {'lr': 1}
============================================ObservedList============================================
List of all attached spectra:
filename: a component: all korel: False loaded: True hasErrors: False global_error: None group: {'rv': [1, 4]} (min, max): (6250.0, 6799.9499999999998)
filename: b component: all korel: False loaded: True hasErrors: False global_error: None group: {'rv': [2, 5]} (min, max): (6250.0, 6799.9499999999998)
filename: c component: all korel: False loaded: True hasErrors: False global_error: None group: {'rv': [3, 6]} (min, max): (6250.0, 6799.9499999999998)
===============================================Fitter===============================================
Fitter: None optional_arguments: {}
Initial parameters:
====================================================================================================
"""
# since our 'observed spectra' are just a model spectra
# the radial velocity and relative luminosity is the
# same for each spectrum, so we might set, that
# relative luminosity is the same for each region
# and radil velocity is the same for each spectrum.
# we have groups for the task - clear the ObservedList
# and RegionList
rl.clear_all()
ol.clear_all()
# add the regions again and set a group in relative luminosity
# for both
rl.add_region(wmin=6330, wmax=6375, groups=dict(lr=0))
rl.add_region(wmin=6500, wmax=6600, groups=dict(lr=0))
# set a radial velocity group for each spectrum
# and add some errors, so we do not have to listen to
# the errros all the time
obs = [
dict(filename='a', group=dict(rv=1), error=0.001),
dict(filename='b', group=dict(rv=2), error=0.001),
dict(filename='c', group=dict(rv=3), error=0.001),
]
ol.add_observations(obs)
# create the Interface again
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
itf.setup()
# review - it - we can now see that there is only
# one relative luminosity and three radial velocities
# for each component.
print itf
"""
==============================================StarList==============================================
Component: primary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: <type 'float'>
name: teff value: 25000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: <type 'float'>
name: vrot value: 150.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: <type 'float'>
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: <type 'float'>
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: <type 'float'>
Component: secondary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: <type 'float'>
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: <type 'float'>
name: teff value: 18000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: <type 'float'>
name: vrot value: 50.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: <type 'float'>
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: <type 'float'>
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: <type 'float'>
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: <type 'float'>
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6330, 6375):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (6500, 6600):
component: all groups: {'lr': 0}
============================================ObservedList============================================
List of all attached spectra:
filename: a component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 1} (min, max): (6250.0, 6799.9499999999998)
filename: b component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 2} (min, max): (6250.0, 6799.9499999999998)
filename: c component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 3} (min, max): (6250.0, 6799.9499999999998)
===============================================Fitter===============================================
Fitter: None optional_arguments: {}
Initial parameters:
====================================================================================================
"""
# lets save the class - it will create a text file, from
# which the interface can be easily loaded
itf.save('setup.itf')
# and have a look what does the comparisons look like -
# figname serves only as a prefix in this case.
itf.plot_all_comparisons(figname='initial')
| 9,099 | 51.601156 | 151 | py |
pyterpol | pyterpol-master/pyterpol_examples/Interface/fit/example.py | """
This tutorial serves as demonstration of how to fit observed
spectra with Pyterpol.
Our observed spectra were created with the old C++ version of the
code. We have three spectra of a binary consisting of
primary: teff = 25000, g = 4.2, , vrot = 150, lr = 0.7, z = 1.0
secondary: teff = 18000, g = 4.2, , vrot = 50, lr = 0.3, z = 1.0
and various radial velocities. They look as if they were
observed spectra.
No we will pick up the interface where we ended and
fit the data.
"""
import pyterpol
import numpy as np
# Create the fitting environment and load the last
# session.
itf = pyterpol.Interface.load('setup.itf')
# review the loaded interface - if we compare it with the
# previous example, we see that everything loaded as it
# should
print itf
"""
==============================================StarList==============================================
Component: primary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: None
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: None
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: None
name: teff value: 25000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 150.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
Component: secondary
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 1 _typedef: None
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 2 _typedef: None
name: rv value: 0.0 vmin: -1000.0 vmax: 1000.0 fitted: False group: 3 _typedef: None
name: teff value: 18000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 50.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6330.0, 6375.0):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (6500.0, 6600.0):
component: all groups: {'lr': 0}
============================================ObservedList============================================
List of all attached spectra:
filename: a component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 1} (min, max): (6250.0, 6799.9499999999998)
filename: b component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 2} (min, max): (6250.0, 6799.9499999999998)
filename: c component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 3} (min, max): (6250.0, 6799.9499999999998)
===============================================Fitter===============================================
Fitter: None optional_arguments: {}
Initial parameters:
====================================================================================================
"""
# the second step is to set what will be fitted. Do not forget to
# set the boundaries vmin, vmax too.
# we can do it parameter by parameter
itf.set_parameter(group=1, component='primary', parname='rv', fitted=True)
# or we can set all at once
itf.set_parameter(group=1, component='primary', parname='rv', fitted=True, vmin=-120., vmax=120.)
# or set everything for primary
itf.set_parameter(component='primary', parname='rv', fitted=True, vmin=-120., vmax=120.)
# or set everything for every rv in the StarList
itf.set_parameter(parname='rv', fitted=True, vmin=-120., vmax=120.)
# now lets set the fitter - one even does not have to construct the class
# it is sufficient to choose the fitter - lets take nelder and mead
# preferably nelder and mead, because they use boundaries
# for simplex it is good to set the initial uncertainty
init_step = 50*np.ones(6)
itf.choose_fitter('nlopt_nelder_mead', init_step=init_step, ftol=1e-6)
# lets review the whole session
print itf
"""
==============================================StarList==============================================
Component: primary
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 1 _typedef: None
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 2 _typedef: None
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 3 _typedef: None
name: teff value: 25000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 150.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.7 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
Component: secondary
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 1 _typedef: None
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 2 _typedef: None
name: rv value: 0.0 vmin: -120.0 vmax: 120.0 fitted: True group: 3 _typedef: None
name: teff value: 18000.0 vmin: 6000.0 vmax: 50000.0 fitted: False group: 0 _typedef: None
name: vrot value: 50.0 vmin: 0.0 vmax: 500.0 fitted: False group: 0 _typedef: None
name: logg value: 4.2 vmin: 0.0 vmax: 5.0 fitted: False group: 0 _typedef: None
name: lr value: 0.3 vmin: 0.0 vmax: 1.0 fitted: False group: 0 _typedef: None
name: z value: 1.0 vmin: 0.0 vmax: 2.0 fitted: False group: 0 _typedef: None
=============================================RegionList=============================================
Region name: region00: (wmin, wmax) = (6330.0, 6375.0):
component: all groups: {'lr': 0}
Region name: region01: (wmin, wmax) = (6500.0, 6600.0):
component: all groups: {'lr': 0}
============================================ObservedList============================================
List of all attached spectra:
filename: a component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 1} (min, max): (6250.0, 6799.9499999999998)
filename: b component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 2} (min, max): (6250.0, 6799.9499999999998)
filename: c component: all korel: False loaded: True hasErrors: True global_error: 0.001 group: {'rv': 3} (min, max): (6250.0, 6799.9499999999998)
===============================================Fitter===============================================
Fitter: nlopt_nelder_mead optional_arguments: {}
Initial parameters:(rv, g.): (0.0, 1); (rv, g.): (0.0, 2); (rv, g.): (0.0, 3); (rv, g.): (0.0, 1); (rv, g.): (0.0, 2);
(rv, g.): (0.0, 3);
====================================================================================================
"""
# check the initial chi-square - first we have to get the fitted parameters
# and convert list of Parameters -> list of floats
init_pars = [par['value'] for par in itf.get_fitted_parameters()]
# or we can let the function do it for us
init_pars = itf.get_fitted_parameters(attribute='value')
# or we can let the function to do it for us
init_chi2 = itf.compute_chi2(init_pars)
print "Initial chi-square: %f" % init_chi2
"""
Initial chi-square: 950375.454308
"""
# finally run the fitting
itf.run_fit()
# check the final chi-square
final_pars = itf.get_fitted_parameters(attribute='value')
final_chi2 = itf.compute_chi2(final_pars)
print "Final chi-square (nlopt_nelder_mead): %f" % final_chi2
"""
Final chi-square (nlopt_nelder_mead): 144433.598816
"""
# and plot everything
itf.plot_all_comparisons(figname='final_nm')
# It is not surprising that the fitting failed - Why?!
# for radial velocities one is in general far from the
# global minimum - the variation can ce high, so
# it is better to get the firt estimate with a global method
# like differential evolution
itf.choose_fitter('sp_diff_evol')
itf.run_fit()
# check the final chi-square
final_pars = itf.get_fitted_parameters(attribute='value')
final_chi2 = itf.compute_chi2(final_pars)
print "Final chi-square: %f (sp_diff_evol)" % final_chi2
"""
Final chi-square: 73.231889 (sp_diff_evol)
"""
# lets see the difference
itf.plot_all_comparisons(figname='final_de')
# The message here is that before one really tries
# to fit radiative rpoerties, it is better to do the
# fitting of RVs first. Since we are not using
# any previous information on the RVs (the orbital
# solution is not attached) it is better to
# use global method - especially for large parameter space
itf.save('fitted.itf')
| 8,806 | 45.845745 | 146 | py |
pyterpol | pyterpol-master/pyterpol_examples/SyntheticSpectrum/example.py | """
This is a tutorial script how to handle the class Synthetic Spectrum.
"""
import pyterpol
import numpy as np
import matplotlib.pyplot as plt
# Load the spectrum using the library numpy
wave, intens = np.loadtxt('grid.dat', unpack=True, usecols=[0,1])
# The synthetic spectrum can be created either from arrays
ss = pyterpol.SyntheticSpectrum(wave=wave, intens=intens)
# or just loaded from an array
ss = pyterpol.SyntheticSpectrum(f='grid.dat')
# usually your spectra have additional properties.
# Note that parameters passed during the contruction
# of teh class do not have impact on the underlying spectrum
ss = pyterpol.SyntheticSpectrum(f='grid.dat', teff=18000, logg=4.5, idiotic_parameter='pes')
# which you can then easily review
print ss
# or change
ss['teff'] = 12000
# and review the changes
print ss
# Or we can directly view the spectrum
ss.plot(savefig=True, figname='original.png')
# the spectrum can be rotated
newwave, newintens = ss.get_spectrum(vrot=50)
# shifted in rv
newwave, newintens = ss.get_spectrum(rv=50)
# shrinked
newwave, newintens = ss.get_spectrum(lr=0.7)
# or transformed to KOREL
newwave, newintens = ss.get_spectrum(korel=True)
# or all together
newwave, newintens = ss.get_spectrum(vrot=50, lr=0.7, rv=50, korel=True)
# lets wrap this into SyntheticSpectrum and plot it
nss = pyterpol.SyntheticSpectrum(wave=newwave, intens=newintens)
nss.plot(savefig=True, figname='adjusted.png')
plt.show()
| 1,449 | 25.851852 | 92 | py |
pyterpol | pyterpol-master/pyterpol_examples/SyntheticGrid/example.py | """
This script serves a demonstration of the class SyntheticGrid.
"""
# import the library
import pyterpol
import matplotlib.pyplot as plt
# The handling of the synthetic grid is shadowed from the user,
# therefore the interaction of the user with the grid should
# restrict to only few methods.
# How to create a grid? Ups - we have forgotten, which modes are available.as
# So we create a default grid and have a look at the grids that are available
# In general user should use default grid, because it spans over all
# implemented grids
sg = pyterpol.SyntheticGrid()
# The method list_modes returns string, so it has to be printed
print sg.list_modes()
# Now we know the modes, so we can either create the grid again
sg = pyterpol.SyntheticGrid(mode='bstar')
# or just set mode for the existing one - BSTAR will be our
# exemplary grid.
sg.set_mode(mode='bstar')
# we set up a grid, so we can interpolate.
# synthetic spectra should be queried with
# the method get_synthetic_spectrum - lets do some calls
# Grid parameters have to be wrapped using
# following dictionary
pars = dict(teff=18200, logg=4.3, z=1.2)
# We should also pass some boundaries, unless we want
# to get the whole wavelength range of the grid
spectrum1 = sg.get_synthetic_spectrum(pars, [4300, 4400])
# we can view properties of the synthetic spectrum
print spectrum1
# we can of course plot_it
spectrum1.plot(savefig=True, figname='spectrum1.png')
# it is class SyntheticSpectrum, so it has all its
# features, if we want the synthetic spectrum to adopt the
# new spectrum we say that with keyword 'keep'
spectrum1.get_spectrum(vrot=30., rv=-200., lr=0.3, wmin=4300, wmax=4400, keep=True)
spectrum1.plot(savefig=True, figname='spectrum1_adjusted.png')
# A great feature of the class is that it remembers all
# loaded spectra until the program ends. This means that
# if your nect interpolation requires similar spectra
# from the grid, everything will be much faster
pars = dict(teff=18300, logg=4.2, z=1.1)
spectrum1= sg.get_synthetic_spectrum(pars, [4300, 4400])
# User can also change the resolution of the grid
# by setting keyword step and the number of the
# spectra that are used for interpolation by setting
# keyword order
# step = wavelength step
# order = maximal number of spectra, that should be used for
# interpolation
pars = dict(teff=29300, logg=3.1, z=0.74)
spectrum2 = sg.get_synthetic_spectrum(pars, [4300, 4400], order=4, step=0.05)
#
# # plot comparison of the two spectra
fig = plt.figure()
ax = fig.add_subplot(111)
spectrum1.plot(ax=ax)
spectrum2.plot(ax=ax)
plt.savefig('comparison.png')
| 2,610 | 31.6375 | 83 | py |
pyterpol | pyterpol-master/pyterpol_examples/observed_spectra_fitting/v746cas/v746cas_2.py | """
V746Cas - fitting of a observed spectra.
This example also show, ho we can proceed if
we want to fit parameters step by step.
"""
import pyterpol
import matplotlib.pyplot as plt
def inspect_spectra(f):
ifile = open(f, 'r')
slist = ifile.readlines()
ifile.close()
for rec in slist:
ifile = open(rec.rstrip('\n'), 'r')
ifile.readline()
x, y = np.loadtxt(ifile, unpack=True, usecols=[0,1])
plt.plot(x, y, '-')
plt.show()
# Setting up interface is something, that should be kept
# separated from the fitting, because consequetive fits
# change teh initial settings.
def setup_interface_single_obs():
# have a look at one observation
ol = pyterpol.ObservedList()
obs = pyterpol.ObservedSpectrum(filename='v7c00001.asc', group=dict(rv=0), error=0.01)
# two methods how to estimate the error
print obs.get_sigma_from_continuum(cmin=6665, cmax=6670)
print obs.get_sigma_from_fft()
ol.add_observations([obs])
# define a starlist
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=17000., logg=4.0, vrot=180., z=1.0, lr=1.0)
# define regions
rl = pyterpol.RegionList()
rl.add_region(wmin=6340, wmax=6410, groups=dict(lr=0))
rl.add_region(wmin=6520, wmax=6610, groups=dict(lr=0))
rl.add_region(wmin=6665, wmax=6690, groups=dict(lr=0))
# create interfaces
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
# set fit order = 2 to do it fast
itf.set_grid_properties(order=2)
itf.setup()
# review the result - one rv group, one lr group
print itf
# plot comparison
itf.plot_all_comparisons(figname='teff17000')
# try different temperatures - this way we can easilyt review
# several comparisons
itf.set_parameter(parname='teff', value=25000.)
itf.populate_comparisons()
itf.plot_all_comparisons(figname='teff25000')
itf.set_parameter(parname='teff', value=13000.)
itf.populate_comparisons()
itf.plot_all_comparisons(figname='teff13000')
itf.save('initial.itf')
# if we want to fit interactively, parameter by parameter
# it is easier to use the save/load mechanism
# itf = pyterpol.Interface.load('tefffit.itf')
# itf = pyterpol.Interface.load('vrotfit.itf')
itf = pyterpol.Interface.load('loggfit.itf')
# choose a fitter
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-6)
# change another parameter
# itf.set_parameter(parname='vrot', vmin=120., vmax=200., fitted=True)
# itf.set_parameter(parname='logg', vmin=3.5, vmax=4.5, fitted=True)
itf.set_parameter(parname='z', vmin=0.5, vmax=2.0, fitted=True)
itf.set_parameter(parname='teff', vmin=15000., fitted=True)
itf.run_fit()
# get the result
# itf.plot_all_comparisons(figname='vrotfit')
itf.plot_all_comparisons(figname='zfit')
# itf.write_fitted_parameters(outputname='iter03.res')
itf.write_fitted_parameters(outputname='iter05.res')
# save a new Interface
# itf.save('vrotfit.itf')
itf.save('zfit.itf')
| 2,974 | 28.455446 | 90 | py |
pyterpol | pyterpol-master/pyterpol_examples/observed_spectra_fitting/v746cas/v746cas.py | """
V746Cas - fitting of a observed spectra.
This example also show, ho we can proceed if
we want to fit parameters step by step.
"""
import pyterpol
import matplotlib.pyplot as plt
def inspect_spectra(f):
ifile = open(f, 'r')
slist = ifile.readlines()
ifile.close()
for rec in slist:
ifile = open(rec.rstrip('\n'), 'r')
ifile.readline()
x, y = np.loadtxt(ifile, unpack=True, usecols=[0,1])
plt.plot(x, y, '-')
plt.show()
# Setting up interface is something, that should be kept
# separated from the fitting, because consequetive fits
# change teh initial settings.
def setup_interface_single_obs():
# have a look at one observation
ol = pyterpol.ObservedList()
obs = pyterpol.ObservedSpectrum(filename='v7c00001.asc', group=dict(rv=0), error=0.01)
# two methods how to estimate the error
print obs.get_sigma_from_continuum(cmin=6665, cmax=6670)
print obs.get_sigma_from_fft()
ol.add_observations([obs])
# define a starlist
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=17000., logg=4.0, vrot=180., z=1.0, lr=1.0)
# define regions
rl = pyterpol.RegionList()
rl.add_region(wmin=6340, wmax=6410, groups=dict(lr=0))
rl.add_region(wmin=6520, wmax=6610, groups=dict(lr=0))
rl.add_region(wmin=6665, wmax=6690, groups=dict(lr=0))
# create interfaces
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol)
# set fit order = 2 to do it fast
itf.set_grid_properties(order=2)
itf.setup()
# review the result - one rv group, one lr group
print itf
# plot comparison
itf.plot_all_comparisons(figname='teff17000')
# try different temperatures - this way we can easilyt review
# several comparisons
itf.set_parameter(parname='teff', value=25000.)
itf.populate_comparisons()
itf.plot_all_comparisons(figname='teff25000')
itf.set_parameter(parname='teff', value=13000.)
itf.populate_comparisons()
itf.plot_all_comparisons(figname='teff13000')
itf.save('initial.itf')
# if we want to fit interactively, parameter by parameter
# it is easier to use the save/load mechanism
# itf = pyterpol.Interface.load('tefffit.itf')
# itf = pyterpol.Interface.load('vrotfit.itf')
itf = pyterpol.Interface.load('loggfit.itf')
# choose a fitter
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-6)
# change another parameter
# itf.set_parameter(parname='vrot', vmin=120., vmax=200., fitted=True)
# itf.set_parameter(parname='logg', vmin=3.5, vmax=4.5, fitted=True)
itf.set_parameter(parname='z', vmin=0.5, vmax=2.0, fitted=True)
itf.set_parameter(parname='teff', vmin=15000., fitted=True)
itf.run_fit()
# get the result
# itf.plot_all_comparisons(figname='vrotfit')
itf.plot_all_comparisons(figname='zfit')
# itf.write_fitted_parameters(outputname='iter03.res')
itf.write_fitted_parameters(outputname='iter05.res')
# save a new Interface
# itf.save('vrotfit.itf')
itf.save('zfit.itf')
| 2,974 | 28.455446 | 90 | py |
pyterpol | pyterpol-master/pyterpol_examples/observed_spectra_fitting/v746cas_2/v746cas_2.py | """
V746Cas - fitting of a observed spectra.
This example also show, ho we can proceed if
we want to fit parameters step by step.
"""
import pyterpol
import numpy as np
import matplotlib.pyplot as plt
def inspect_spectra(f):
"""
Plots all spectra.
:param f:
:return:
"""
ifile = open(f, 'r')
slist = ifile.readlines()
ifile.close()
for rec in slist:
ifile = open(rec.rstrip('\n'), 'r')
ifile.readline()
x, y = np.loadtxt(ifile, unpack=True, usecols=[0,1])
plt.plot(x, y, '-')
plt.show()
def read_obs_from_list(f):
"""
Create a list of observations.
:param f:
:return:
"""
ifile = open(f, 'r')
slist = ifile.readlines()
ifile.close()
obs = []
for i,rec in enumerate(slist[:]):
o = pyterpol.ObservedSpectrum(filename=rec.rstrip('\n'), group=dict(rv=i))
o.get_sigma_from_continuum(cmin=6620., cmax=6640., store=True)
obs.append(o)
return obs
def setup_interface_more_obs():
# have a look at one observation
ol = pyterpol.ObservedList()
obs = read_obs_from_list('spec.lis')
ol.add_observations(obs)
# define a starlist
sl = pyterpol.StarList()
sl.add_component(component='primary', teff=16000., logg=3.9, vrot=95., z=1.2, lr=1.0)
# define regions
rl = pyterpol.RegionList()
rl.add_region(wmin=6340, wmax=6410, groups=dict(lr=0))
rl.add_region(wmin=6540, wmax=6595, groups=dict(lr=0))
rl.add_region(wmin=6670, wmax=6685, groups=dict(lr=0))
# create interfaces
itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol, debug=False)
# set fit order = 2 to do it fast
itf.set_grid_properties(order=2, step=0.05)
itf.setup()
# save the session
itf.save('initial.itf')
def optimize_rv(session0, session1):
"""
Optimizes RV.
:return:
"""
# setup the spectra
itf = pyterpol.Interface.load(session0)
# set parameters
itf.set_parameter(parname='rv', fitted=True, vmin=-60., vmax=60.)
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-6)
# run fit
itf.run_fit()
# plot every comparison
itf.plot_all_comparisons(figname='rvfit')
# save the fit
itf.save(session1)
def optimize_all(session0, session1):
"""
Optimizes all parameters
:return:
"""
# setup the spectra
itf = pyterpol.Interface.load(session0)
# itf.set_one_for_all(True)
itf.set_parameter(parname='rv', fitted=True, vmin=-60., vmax=60.)
itf.set_parameter(parname='teff', fitted=True, vmin=15000., vmax=17000.)
itf.set_parameter(parname='logg', fitted=True, vmin=3.7, vmax=4.2)
itf.set_parameter(parname='vrot', fitted=True, vmin=80., vmax=160.)
itf.set_parameter(parname='z', fitted=True, vmin=1.0, vmax=2.0)
itf.choose_fitter('nlopt_nelder_mead', ftol=1e-5)
# run fit
itf.run_fit()
# plot every comparison
itf.plot_all_comparisons(figname='nmallfit')
# save the fit
itf.save(session1)
return itf
# setup the interface
setup_interface_more_obs()
# run the optimization
itf = optimize_all('initial.itf', 'nmallfit_newinit.itf')
# plot the comparisons found with the minimizer
#itf.plot_all_comparisons()
# set errors for mc, mc estimation, they should lie within the interval
# there is no point in fitting the z, since it is converging of of the
# grid.
#itf.set_error(parname='rv', error=10.)
#itf.set_one_for_all(True)
#itf.set_parameter(parname='teff', vmin=15000., vmax=16500.)
#itf.set_parameter(parname='logg', vmin=3.5, vmax=4.2)
#itf.set_parameter(parname='vrot', vmin=120., vmax=160.)
#itf.set_parameter(parname='z', value=2.0, fitted=False)
#itf.run_mcmc(chain_file='chain.dat', niter=200)
| 3,716 | 24.993007 | 89 | py |
pyterpol | pyterpol-master/pyterpol_examples/observed_spectra_fitting/v746cas_2/v746cas_eval_mcmc.py | import pyterpol
# check convergence of individual parameters
pyterpol.Interface.plot_convergence_mcmc('chain.dat', figname='mcmc_convergence.png')
# plot covariance of radiative parameters
pyterpol.Interface.plot_covariances_mcmc('chain.dat', parameters=['vrot', 'teff', 'logg'], figname='mcmc_correlations.png')
# plot variance of rvs
pyterpol.Interface.plot_variances_mcmc('chain.dat', parameters=['rv'], figname='rv_var')
# write result
pyterpol.Interface.write_mc_result('chain.dat', outputname='mcmc.res') | 514 | 38.615385 | 123 | py |