有兴趣的可以看下:

# Hidden Markov Models
#
# Author: Ron Weiss <ronweiss@gmail.com>
#         Shiqiao Du <lucidfrontier.45@gmail.com>
# API changes: Jaques Grobler <jaquesgrobler@gmail.com>
# Modifications to create of the HMMLearn module: Gael Varoquaux
# More API changes: Sergei Lebedev <superbobry@gmail.com>"""
The :mod:`hmmlearn.hmm` module implements hidden Markov models.
"""import numpy as np
from scipy.special import logsumexp
from sklearn import cluster
from sklearn.utils import check_random_statefrom . import _utils
from .stats import log_multivariate_normal_density
from .base import _BaseHMM
from .utils import iter_from_X_lengths, normalize, fill_covars__all__ = ["GMMHMM", "GaussianHMM", "MultinomialHMM"]COVARIANCE_TYPES = frozenset(("spherical", "diag", "full", "tied"))class GaussianHMM(_BaseHMM):r"""Hidden Markov Model with Gaussian emissions.Parameters----------n_components : intNumber of states.covariance_type : string, optionalString describing the type of covariance parameters touse.  Must be one of* "spherical" --- each state uses a single variance value thatapplies to all features.* "diag" --- each state uses a diagonal covariance matrix.* "full" --- each state uses a full (i.e. unrestricted)covariance matrix.* "tied" --- all states use **the same** full covariance matrix.Defaults to "diag".min_covar : float, optionalFloor on the diagonal of the covariance matrix to preventoverfitting. Defaults to 1e-3.startprob_prior : array, shape (n_components, ), optionalParameters of the Dirichlet prior distribution for:attr:`startprob_`.transmat_prior : array, shape (n_components, n_components), optionalParameters of the Dirichlet prior distribution for each rowof the transition probabilities :attr:`transmat_`.means_prior, means_weight : array, shape (n_components, ), optionalMean and precision of the Normal prior distribtion for:attr:`means_`.covars_prior, covars_weight : array, shape (n_components, ), optionalParameters of the prior distribution for the covariance matrix:attr:`covars_`.If :attr:`covariance_type` is "spherical" or "diag" the prior isthe inverse gamma distribution, otherwise --- the inverse Wishartdistribution.algorithm : string, optionalDecoder algorithm. Must be one of "viterbi" or`"map".Defaults to "viterbi".random_state: RandomState or an int seed, optionalA random number generator instance.n_iter : int, optionalMaximum number of iterations to perform.tol : float, optionalConvergence threshold. EM will stop if the gain in log-likelihoodis below this value.verbose : bool, optionalWhen ``True`` per-iteration convergence reports are printedto :data:`sys.stderr`. You can diagnose convergence via the:attr:`monitor_` attribute.params : string, optionalControls which parameters are updated in the trainingprocess.  Can contain any combination of 's' for startprob,'t' for transmat, 'm' for means and 'c' for covars. Defaultsto all parameters.init_params : string, optionalControls which parameters are initialized prior totraining.  Can contain any combination of 's' forstartprob, 't' for transmat, 'm' for means and 'c' for covars.Defaults to all parameters.Attributes----------n_features : intDimensionality of the Gaussian emissions.monitor\_ : ConvergenceMonitorMonitor object used to check the convergence of EM.transmat\_ : array, shape (n_components, n_components)Matrix of transition probabilities between states.startprob\_ : array, shape (n_components, )Initial state occupation distribution.means\_ : array, shape (n_components, n_features)Mean parameters for each state.covars\_ : arrayCovariance parameters for each state.The shape depends on :attr:`covariance_type`::(n_components, )                        if "spherical",(n_features, n_features)                if "tied",(n_components, n_features)              if "diag",(n_components, n_features, n_features)  if "full"Examples-------->>> from hmmlearn.hmm import GaussianHMM>>> GaussianHMM(n_components=2)...                             #doctest: +ELLIPSIS +NORMALIZE_WHITESPACEGaussianHMM(algorithm='viterbi',..."""def __init__(self, n_components=1, covariance_type='diag',min_covar=1e-3,startprob_prior=1.0, transmat_prior=1.0,means_prior=0, means_weight=0,covars_prior=1e-2, covars_weight=1,algorithm="viterbi", random_state=None,n_iter=10, tol=1e-2, verbose=False,params="stmc", init_params="stmc"):_BaseHMM.__init__(self, n_components,startprob_prior=startprob_prior,transmat_prior=transmat_prior, algorithm=algorithm,random_state=random_state, n_iter=n_iter,tol=tol, params=params, verbose=verbose,init_params=init_params)self.covariance_type = covariance_typeself.min_covar = min_covarself.means_prior = means_priorself.means_weight = means_weightself.covars_prior = covars_priorself.covars_weight = covars_weight@propertydef covars_(self):"""Return covars as a full matrix."""return fill_covars(self._covars_, self.covariance_type,self.n_components, self.n_features)@covars_.setterdef covars_(self, covars):self._covars_ = np.asarray(covars).copy()def _check(self):super(GaussianHMM, self)._check()self.means_ = np.asarray(self.means_)self.n_features = self.means_.shape[1]if self.covariance_type not in COVARIANCE_TYPES:raise ValueError('covariance_type must be one of {0}'.format(COVARIANCE_TYPES))_utils._validate_covars(self._covars_, self.covariance_type,self.n_components)def _init(self, X, lengths=None):super(GaussianHMM, self)._init(X, lengths=lengths)_, n_features = X.shapeif hasattr(self, 'n_features') and self.n_features != n_features:raise ValueError('Unexpected number of dimensions, got %s but ''expected %s' % (n_features, self.n_features))self.n_features = n_featuresif 'm' in self.init_params or not hasattr(self, "means_"):kmeans = cluster.KMeans(n_clusters=self.n_components,random_state=self.random_state)kmeans.fit(X)self.means_ = kmeans.cluster_centers_if 'c' in self.init_params or not hasattr(self, "covars_"):cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])if not cv.shape:cv.shape = (1, 1)self._covars_ = \_utils.distribute_covar_matrix_to_match_covariance_type(cv, self.covariance_type, self.n_components).copy()def _compute_log_likelihood(self, X):return log_multivariate_normal_density(X, self.means_, self._covars_, self.covariance_type)def _generate_sample_from_state(self, state, random_state=None):random_state = check_random_state(random_state)return random_state.multivariate_normal(self.means_[state], self.covars_[state])def _initialize_sufficient_statistics(self):stats = super(GaussianHMM, self)._initialize_sufficient_statistics()stats['post'] = np.zeros(self.n_components)stats['obs'] = np.zeros((self.n_components, self.n_features))stats['obs**2'] = np.zeros((self.n_components, self.n_features))if self.covariance_type in ('tied', 'full'):stats['obs*obs.T'] = np.zeros((self.n_components, self.n_features,self.n_features))return statsdef _accumulate_sufficient_statistics(self, stats, obs, framelogprob,posteriors, fwdlattice, bwdlattice):super(GaussianHMM, self)._accumulate_sufficient_statistics(stats, obs, framelogprob, posteriors, fwdlattice, bwdlattice)if 'm' in self.params or 'c' in self.params:stats['post'] += posteriors.sum(axis=0)stats['obs'] += np.dot(posteriors.T, obs)if 'c' in self.params:if self.covariance_type in ('spherical', 'diag'):stats['obs**2'] += np.dot(posteriors.T, obs ** 2)elif self.covariance_type in ('tied', 'full'):# posteriors: (nt, nc); obs: (nt, nf); obs: (nt, nf)# -> (nc, nf, nf)stats['obs*obs.T'] += np.einsum('ij,ik,il->jkl', posteriors, obs, obs)def _do_mstep(self, stats):super(GaussianHMM, self)._do_mstep(stats)means_prior = self.means_priormeans_weight = self.means_weight# TODO: find a proper reference for estimates for different#       covariance models.# Based on Huang, Acero, Hon, "Spoken Language Processing",# p. 443 - 445denom = stats['post'][:, np.newaxis]if 'm' in self.params:self.means_ = ((means_weight * means_prior + stats['obs'])/ (means_weight + denom))if 'c' in self.params:covars_prior = self.covars_priorcovars_weight = self.covars_weightmeandiff = self.means_ - means_priorif self.covariance_type in ('spherical', 'diag'):cv_num = (means_weight * meandiff**2+ stats['obs**2']- 2 * self.means_ * stats['obs']+ self.means_**2 * denom)cv_den = max(covars_weight - 1, 0) + denomself._covars_ = \(covars_prior + cv_num) / np.maximum(cv_den, 1e-5)if self.covariance_type == 'spherical':self._covars_ = np.tile(self._covars_.mean(1)[:, np.newaxis],(1, self._covars_.shape[1]))elif self.covariance_type in ('tied', 'full'):cv_num = np.empty((self.n_components, self.n_features,self.n_features))for c in range(self.n_components):obsmean = np.outer(stats['obs'][c], self.means_[c])cv_num[c] = (means_weight * np.outer(meandiff[c],meandiff[c])+ stats['obs*obs.T'][c]- obsmean - obsmean.T+ np.outer(self.means_[c], self.means_[c])* stats['post'][c])cvweight = max(covars_weight - self.n_features, 0)if self.covariance_type == 'tied':self._covars_ = ((covars_prior + cv_num.sum(axis=0)) /(cvweight + stats['post'].sum()))elif self.covariance_type == 'full':self._covars_ = ((covars_prior + cv_num) /(cvweight + stats['post'][:, None, None]))class MultinomialHMM(_BaseHMM):r"""Hidden Markov Model with multinomial (discrete) emissionsParameters----------n_components : intNumber of states.startprob_prior : array, shape (n_components, ), optionalParameters of the Dirichlet prior distribution for:attr:`startprob_`.transmat_prior : array, shape (n_components, n_components), optionalParameters of the Dirichlet prior distribution for each rowof the transition probabilities :attr:`transmat_`.algorithm : string, optionalDecoder algorithm. Must be one of "viterbi" or "map".Defaults to "viterbi".random_state: RandomState or an int seed, optionalA random number generator instance.n_iter : int, optionalMaximum number of iterations to perform.tol : float, optionalConvergence threshold. EM will stop if the gain in log-likelihoodis below this value.verbose : bool, optionalWhen ``True`` per-iteration convergence reports are printedto :data:`sys.stderr`. You can diagnose convergence via the:attr:`monitor_` attribute.params : string, optionalControls which parameters are updated in the trainingprocess.  Can contain any combination of 's' for startprob,'t' for transmat, 'e' for emissionprob.Defaults to all parameters.init_params : string, optionalControls which parameters are initialized prior totraining.  Can contain any combination of 's' forstartprob, 't' for transmat, 'e' for emissionprob.Defaults to all parameters.Attributes----------n_features : intNumber of possible symbols emitted by the model (in the samples).monitor\_ : ConvergenceMonitorMonitor object used to check the convergence of EM.transmat\_ : array, shape (n_components, n_components)Matrix of transition probabilities between states.startprob\_ : array, shape (n_components, )Initial state occupation distribution.emissionprob\_ : array, shape (n_components, n_features)Probability of emitting a given symbol when in each state.Examples-------->>> from hmmlearn.hmm import MultinomialHMM>>> MultinomialHMM(n_components=2)...                             #doctest: +ELLIPSIS +NORMALIZE_WHITESPACEMultinomialHMM(algorithm='viterbi',..."""# TODO: accept the prior on emissionprob_ for consistency.def __init__(self, n_components=1,startprob_prior=1.0, transmat_prior=1.0,algorithm="viterbi", random_state=None,n_iter=10, tol=1e-2, verbose=False,params="ste", init_params="ste"):_BaseHMM.__init__(self, n_components,startprob_prior=startprob_prior,transmat_prior=transmat_prior,algorithm=algorithm,random_state=random_state,n_iter=n_iter, tol=tol, verbose=verbose,params=params, init_params=init_params)def _init(self, X, lengths=None):if not self._check_input_symbols(X):raise ValueError("expected a sample from ""a Multinomial distribution.")super(MultinomialHMM, self)._init(X, lengths=lengths)self.random_state = check_random_state(self.random_state)if 'e' in self.init_params:if not hasattr(self, "n_features"):symbols = set()for i, j in iter_from_X_lengths(X, lengths):symbols |= set(X[i:j].flatten())self.n_features = len(symbols)self.emissionprob_ = self.random_state \.rand(self.n_components, self.n_features)normalize(self.emissionprob_, axis=1)def _check(self):super(MultinomialHMM, self)._check()self.emissionprob_ = np.atleast_2d(self.emissionprob_)n_features = getattr(self, "n_features", self.emissionprob_.shape[1])if self.emissionprob_.shape != (self.n_components, n_features):raise ValueError("emissionprob_ must have shape (n_components, n_features)")else:self.n_features = n_featuresdef _compute_log_likelihood(self, X):return np.log(self.emissionprob_)[:, np.concatenate(X)].Tdef _generate_sample_from_state(self, state, random_state=None):cdf = np.cumsum(self.emissionprob_[state, :])random_state = check_random_state(random_state)return [(cdf > random_state.rand()).argmax()]def _initialize_sufficient_statistics(self):stats = super(MultinomialHMM, self)._initialize_sufficient_statistics()stats['obs'] = np.zeros((self.n_components, self.n_features))return statsdef _accumulate_sufficient_statistics(self, stats, X, framelogprob,posteriors, fwdlattice, bwdlattice):super(MultinomialHMM, self)._accumulate_sufficient_statistics(stats, X, framelogprob, posteriors, fwdlattice, bwdlattice)if 'e' in self.params:for t, symbol in enumerate(np.concatenate(X)):stats['obs'][:, symbol] += posteriors[t]def _do_mstep(self, stats):super(MultinomialHMM, self)._do_mstep(stats)if 'e' in self.params:self.emissionprob_ = (stats['obs']/ stats['obs'].sum(axis=1)[:, np.newaxis])def _check_input_symbols(self, X):"""Check if ``X`` is a sample from a Multinomial distribution.That is ``X`` should be an array of non-negative integers fromrange ``[min(X), max(X)]``, such that each integer from the rangeoccurs in ``X`` at least once.For example ``[0, 0, 2, 1, 3, 1, 1]`` is a valid sample from aMultinomial distribution, while ``[0, 0, 3, 5, 10]`` is not."""symbols = np.concatenate(X)if (len(symbols) == 1 or          # not enough datasymbols.dtype.kind != 'i' or  # not an integer(symbols < 0).any()):         # contains negative integersreturn Falsesymbols.sort()return np.all(np.diff(symbols) <= 1)class GMMHMM(_BaseHMM):r"""Hidden Markov Model with Gaussian mixture emissions.Parameters----------n_components : intNumber of states in the model.n_mix : intNumber of states in the GMM.covariance_type : string, optionalString describing the type of covariance parameters touse.  Must be one of* "spherical" --- each state uses a single variance value thatapplies to all features.* "diag" --- each state uses a diagonal covariance matrix.* "full" --- each state uses a full (i.e. unrestricted)covariance matrix.* "tied" --- all states use **the same** full covariance matrix.Defaults to "diag".min_covar : float, optionalFloor on the diagonal of the covariance matrix to preventoverfitting. Defaults to 1e-3.startprob_prior : array, shape (n_components, ), optionalParameters of the Dirichlet prior distribution for:attr:`startprob_`.transmat_prior : array, shape (n_components, n_components), optionalParameters of the Dirichlet prior distribution for each rowof the transition probabilities :attr:`transmat_`.weights_prior : array, shape (n_mix, ), optionalParameters of the Dirichlet prior distribution for:attr:`weights_`.means_prior, means_weight : array, shape (n_mix, ), optionalMean and precision of the Normal prior distribtion for:attr:`means_`.covars_prior, covars_weight : array, shape (n_mix, ), optionalParameters of the prior distribution for the covariance matrix:attr:`covars_`.If :attr:`covariance_type` is "spherical" or "diag" the prior isthe inverse gamma distribution, otherwise --- the inverse Wishartdistribution.algorithm : string, optionalDecoder algorithm. Must be one of "viterbi" or "map".Defaults to "viterbi".random_state: RandomState or an int seed, optionalA random number generator instance.n_iter : int, optionalMaximum number of iterations to perform.tol : float, optionalConvergence threshold. EM will stop if the gain in log-likelihoodis below this value.verbose : bool, optionalWhen ``True`` per-iteration convergence reports are printedto :data:`sys.stderr`. You can diagnose convergence via the:attr:`monitor_` attribute.init_params : string, optionalControls which parameters are initialized prior to training. Cancontain any combination of 's' for startprob, 't' for transmat, 'm'for means, 'c' for covars, and 'w' for GMM mixing weights.Defaults to all parameters.params : string, optionalControls which parameters are updated in the training process.  Cancontain any combination of 's' for startprob, 't' for transmat, 'm' formeans, and 'c' for covars, and 'w' for GMM mixing weights.Defaults to all parameters.Attributes----------monitor\_ : ConvergenceMonitorMonitor object used to check the convergence of EM.startprob\_ : array, shape (n_components, )Initial state occupation distribution.transmat\_ : array, shape (n_components, n_components)Matrix of transition probabilities between states.weights\_ : array, shape (n_components, n_mix)Mixture weights for each state.means\_ : array, shape (n_components, n_mix)Mean parameters for each mixture component in each state.covars\_ : arrayCovariance parameters for each mixture components in each state.The shape depends on :attr:`covariance_type`::(n_components, n_mix)                          if "spherical",(n_components, n_features, n_features)         if "tied",(n_components, n_mix, n_features)              if "diag",(n_components, n_mix, n_features, n_features)  if "full""""def __init__(self, n_components=1, n_mix=1,min_covar=1e-3, startprob_prior=1.0, transmat_prior=1.0,weights_prior=1.0, means_prior=0.0, means_weight=0.0,covars_prior=None, covars_weight=None,algorithm="viterbi", covariance_type="diag",random_state=None, n_iter=10, tol=1e-2,verbose=False, params="stmcw",init_params="stmcw"):_BaseHMM.__init__(self, n_components,startprob_prior=startprob_prior,transmat_prior=transmat_prior,algorithm=algorithm, random_state=random_state,n_iter=n_iter, tol=tol, verbose=verbose,params=params, init_params=init_params)self.covariance_type = covariance_typeself.min_covar = min_covarself.n_mix = n_mixself.weights_prior = weights_priorself.means_prior = means_priorself.means_weight = means_weightself.covars_prior = covars_priorself.covars_weight = covars_weightdef _init(self, X, lengths=None):super(GMMHMM, self)._init(X, lengths=lengths)_n_samples, self.n_features = X.shape# Default values for covariance prior parametersself._init_covar_priors()self._fix_priors_shape()main_kmeans = cluster.KMeans(n_clusters=self.n_components,random_state=self.random_state)labels = main_kmeans.fit_predict(X)kmeanses = []for label in range(self.n_components):kmeans = cluster.KMeans(n_clusters=self.n_mix,random_state=self.random_state)kmeans.fit(X[np.where(labels == label)])kmeanses.append(kmeans)if 'w' in self.init_params or not hasattr(self, "weights_"):self.weights_ = (np.ones((self.n_components, self.n_mix)) /(np.ones((self.n_components, 1)) * self.n_mix))if 'm' in self.init_params or not hasattr(self, "means_"):self.means_ = np.zeros((self.n_components, self.n_mix,self.n_features))for i, kmeans in enumerate(kmeanses):self.means_[i] = kmeans.cluster_centers_if 'c' in self.init_params or not hasattr(self, "covars_"):cv = np.cov(X.T) + self.min_covar * np.eye(self.n_features)if not cv.shape:cv.shape = (1, 1)if self.covariance_type == 'tied':self.covars_ = np.zeros((self.n_components,self.n_features, self.n_features))self.covars_[:] = cvelif self.covariance_type == 'full':self.covars_ = np.zeros((self.n_components, self.n_mix,self.n_features, self.n_features))self.covars_[:] = cvelif self.covariance_type == 'diag':self.covars_ = np.zeros((self.n_components, self.n_mix,self.n_features))self.covars_[:] = np.diag(cv)elif self.covariance_type == 'spherical':self.covars_ = np.zeros((self.n_components, self.n_mix))self.covars_[:] = cv.mean()def _init_covar_priors(self):if self.covariance_type == "full":if self.covars_prior is None:self.covars_prior = 0.0if self.covars_weight is None:self.covars_weight = -(1.0 + self.n_features + 1.0)elif self.covariance_type == "tied":if self.covars_prior is None:self.covars_prior = 0.0if self.covars_weight is None:self.covars_weight = -(self.n_mix + self.n_features + 1.0)elif self.covariance_type == "diag":if self.covars_prior is None:self.covars_prior = -1.5if self.covars_weight is None:self.covars_weight = 0.0elif self.covariance_type == "spherical":if self.covars_prior is None:self.covars_prior = -(self.n_mix + 2.0) / 2.0if self.covars_weight is None:self.covars_weight = 0.0def _fix_priors_shape(self):# If priors are numbers, this function will make them into a# matrix of proper shapeself.weights_prior = np.broadcast_to(self.weights_prior, (self.n_components, self.n_mix)).copy()self.means_prior = np.broadcast_to(self.means_prior,(self.n_components, self.n_mix, self.n_features)).copy()self.means_weight = np.broadcast_to(self.means_weight,(self.n_components, self.n_mix)).copy()if self.covariance_type == "full":self.covars_prior = np.broadcast_to(self.covars_prior,(self.n_components, self.n_mix,self.n_features, self.n_features)).copy()self.covars_weight = np.broadcast_to(self.covars_weight, (self.n_components, self.n_mix)).copy()elif self.covariance_type == "tied":self.covars_prior = np.broadcast_to(self.covars_prior,(self.n_components, self.n_features, self.n_features)).copy()self.covars_weight = np.broadcast_to(self.covars_weight, self.n_components).copy()elif self.covariance_type == "diag":self.covars_prior = np.broadcast_to(self.covars_prior,(self.n_components, self.n_mix, self.n_features)).copy()self.covars_weight = np.broadcast_to(self.covars_weight,(self.n_components, self.n_mix, self.n_features)).copy()elif self.covariance_type == "spherical":self.covars_prior = np.broadcast_to(self.covars_prior, (self.n_components, self.n_mix)).copy()self.covars_weight = np.broadcast_to(self.covars_weight, (self.n_components, self.n_mix)).copy()def _check(self):super(GMMHMM, self)._check()if not hasattr(self, "n_features"):self.n_features = self.means_.shape[2]self._init_covar_priors()self._fix_priors_shape()# Checking covariance typeif self.covariance_type not in COVARIANCE_TYPES:raise ValueError("covariance_type must be one of {0}".format(COVARIANCE_TYPES))self.weights_ = np.array(self.weights_)# Checking mixture weights' shapeif self.weights_.shape != (self.n_components, self.n_mix):raise ValueError("mixture weights must have shape ""(n_components, n_mix), ""actual shape: {0}".format(self.weights_.shape))# Checking mixture weights' mathematical correctnessif not np.allclose(np.sum(self.weights_, axis=1),np.ones(self.n_components)):raise ValueError("mixture weights must sum up to 1")# Checking means' shapeself.means_ = np.array(self.means_)if self.means_.shape != (self.n_components, self.n_mix,self.n_features):raise ValueError("mixture means must have shape ""(n_components, n_mix, n_features), ""actual shape: {0}".format(self.means_.shape))# Checking covariances' shapeself.covars_ = np.array(self.covars_)covars_shape = self.covars_.shapeneeded_shapes = {"spherical": (self.n_components, self.n_mix),"tied": (self.n_components, self.n_features, self.n_features),"diag": (self.n_components, self.n_mix, self.n_features),"full": (self.n_components, self.n_mix,self.n_features, self.n_features)}needed_shape = needed_shapes[self.covariance_type]if covars_shape != needed_shape:raise ValueError("{!r} mixture covars must have shape {0}, ""actual shape: {1}".format(self.covariance_type,needed_shape, covars_shape))# Checking covariances' mathematical correctnessfrom scipy import linalgif (self.covariance_type == "spherical" orself.covariance_type == "diag"):if np.any(self.covars_ <= 0):raise ValueError("{!r} mixture covars must be non-negative".format(self.covariance_type))elif self.covariance_type == "tied":for i, covar in enumerate(self.covars_):if (not np.allclose(covar, covar.T) ornp.any(linalg.eigvalsh(covar) <= 0)):raise ValueError("'tied' mixture covars must be ""symmetric, positive-definite")elif self.covariance_type == "full":for i, mix_covars in enumerate(self.covars_):for j, covar in enumerate(mix_covars):if (not np.allclose(covar, covar.T) ornp.any(linalg.eigvalsh(covar) <= 0)):raise ValueError("'full' covariance matrix of ""mixture {0} of component {1} must be ""symmetric, positive-definite".format(j, i))def _generate_sample_from_state(self, state, random_state=None):if random_state is None:random_state = self.random_staterandom_state = check_random_state(random_state)cur_weights = self.weights_[state]i_gauss = random_state.choice(self.n_mix, p=cur_weights)if self.covariance_type == 'tied':# self.covars_.shape == (n_components, n_features, n_features)# shouldn't that be (n_mix, ...)?covs = self.covars_else:covs = self.covars_[:, i_gauss]covs = fill_covars(covs, self.covariance_type,self.n_components, self.n_features)return random_state.multivariate_normal(self.means_[state, i_gauss], covs[state])def _compute_log_weighted_gaussian_densities(self, X, i_comp):cur_means = self.means_[i_comp]cur_covs = self.covars_[i_comp]if self.covariance_type == 'spherical':cur_covs = cur_covs[:, np.newaxis]log_cur_weights = np.log(self.weights_[i_comp])return log_multivariate_normal_density(X, cur_means, cur_covs, self.covariance_type) + log_cur_weightsdef _compute_log_likelihood(self, X):n_samples, _ = X.shaperes = np.zeros((n_samples, self.n_components))for i in range(self.n_components):log_denses = self._compute_log_weighted_gaussian_densities(X, i)with np.errstate(under="ignore"):res[:, i] = logsumexp(log_denses, axis=1)return resdef _initialize_sufficient_statistics(self):stats = super(GMMHMM, self)._initialize_sufficient_statistics()stats['n_samples'] = 0stats['post_comp_mix'] = Nonestats['post_mix_sum'] = np.zeros((self.n_components, self.n_mix))stats['post_sum'] = np.zeros(self.n_components)stats['samples'] = Nonestats['centered'] = Nonereturn statsdef _accumulate_sufficient_statistics(self, stats, X, framelogprob,post_comp, fwdlattice, bwdlattice):# TODO: support multiple framessuper(GMMHMM, self)._accumulate_sufficient_statistics(stats, X, framelogprob, post_comp, fwdlattice, bwdlattice)n_samples, _ = X.shapestats['n_samples'] = n_samplesstats['samples'] = Xprob_mix = np.zeros((n_samples, self.n_components, self.n_mix))for p in range(self.n_components):log_denses = self._compute_log_weighted_gaussian_densities(X, p)with np.errstate(under="ignore"):prob_mix[:, p, :] = np.exp(log_denses) + np.finfo(np.float).epsprob_mix_sum = np.sum(prob_mix, axis=2)post_mix = prob_mix / prob_mix_sum[:, :, np.newaxis]post_comp_mix = post_comp[:, :, np.newaxis] * post_mixstats['post_comp_mix'] = post_comp_mixstats['post_mix_sum'] = np.sum(post_comp_mix, axis=0)stats['post_sum'] = np.sum(post_comp, axis=0)stats['centered'] = X[:, np.newaxis, np.newaxis, :] - self.means_def _do_mstep(self, stats):super(GMMHMM, self)._do_mstep(stats)n_samples = stats['n_samples']n_features = self.n_features# Maximizing weightsalphas_minus_one = self.weights_prior - 1new_weights_numer = stats['post_mix_sum'] + alphas_minus_onenew_weights_denom = (stats['post_sum'] + np.sum(alphas_minus_one, axis=1))[:, np.newaxis]new_weights = new_weights_numer / new_weights_denom# Maximizing meanslambdas, mus = self.means_weight, self.means_priornew_means_numer = np.einsum('ijk,il->jkl',stats['post_comp_mix'], stats['samples']) + lambdas[:, :, np.newaxis] * musnew_means_denom = (stats['post_mix_sum'] + lambdas)[:, :, np.newaxis]new_means = new_means_numer / new_means_denom# Maximizing covariancescentered_means = self.means_ - musif self.covariance_type == 'full':centered = stats['centered'].reshape((n_samples, self.n_components, self.n_mix, self.n_features, 1))centered_t = stats['centered'].reshape((n_samples, self.n_components, self.n_mix, 1, self.n_features))centered_dots = centered * centered_tpsis_t = np.transpose(self.covars_prior, axes=(0, 1, 3, 2))nus = self.covars_weightcentr_means_resh = centered_means.reshape((self.n_components, self.n_mix, self.n_features, 1))centr_means_resh_t = centered_means.reshape((self.n_components, self.n_mix, 1, self.n_features))centered_means_dots = centr_means_resh * centr_means_resh_tnew_cov_numer = np.einsum('ijk,ijklm->jklm',stats['post_comp_mix'], centered_dots) + psis_t + (lambdas[:, :, np.newaxis, np.newaxis] *centered_means_dots)new_cov_denom = (stats['post_mix_sum'] + 1 + nus + self.n_features + 1)[:, :, np.newaxis, np.newaxis]new_cov = new_cov_numer / new_cov_denomelif self.covariance_type == 'diag':centered2 = stats['centered'] ** 2centered_means2 = centered_means ** 2alphas = self.covars_priorbetas = self.covars_weightnew_cov_numer = np.einsum('ijk,ijkl->jkl',stats['post_comp_mix'], centered2) + lambdas[:, :, np.newaxis] * centered_means2 + 2 * betasnew_cov_denom = (stats['post_mix_sum'][:, :, np.newaxis] + 1 + 2 * (alphas + 1))new_cov = new_cov_numer / new_cov_denomelif self.covariance_type == 'spherical':centered_norm2 = np.sum(stats['centered'] ** 2, axis=-1)alphas = self.covars_priorbetas = self.covars_weightcentered_means_norm2 = np.sum(centered_means ** 2, axis=-1)new_cov_numer = np.einsum('ijk,ijk->jk',stats['post_comp_mix'], centered_norm2) + lambdas * centered_means_norm2 + 2 * betasnew_cov_denom = (n_features * stats['post_mix_sum'] + n_features +2 * (alphas + 1))new_cov = new_cov_numer / new_cov_denomelif self.covariance_type == 'tied':centered = stats['centered'].reshape((n_samples, self.n_components, self.n_mix, self.n_features, 1))centered_t = stats['centered'].reshape((n_samples, self.n_components, self.n_mix, 1, self.n_features))centered_dots = centered * centered_tpsis_t = np.transpose(self.covars_prior, axes=(0, 2, 1))nus = self.covars_weightcentr_means_resh = centered_means.reshape((self.n_components, self.n_mix, self.n_features, 1))centr_means_resh_t = centered_means.reshape((self.n_components, self.n_mix, 1, self.n_features))centered_means_dots = centr_means_resh * centr_means_resh_tlambdas_cmdots_prod_sum = np.einsum('ij,ijkl->ikl',lambdas, centered_means_dots)new_cov_numer = np.einsum('ijk,ijklm->jlm',stats['post_comp_mix'], centered_dots) + lambdas_cmdots_prod_sum + psis_tnew_cov_denom = (stats['post_sum'] + self.n_mix + nus + self.n_features + 1)[:, np.newaxis, np.newaxis]new_cov = new_cov_numer / new_cov_denom# Assigning new values to class membersself.weights_ = new_weightsself.means_ = new_meansself.covars_ = new_cov

hmmlearn源代码相关推荐

  1. idea中设置指向源代码Scala

    1.到官网下载scala源代码 点击如下链接下载源码:http://www.scala-lang.org/download/all.html 选择需要的版本点击进行下载,我选择的是2.11.8版本,如 ...

  2. Android系统默认Home应用程序(Launcher)的启动过程源代码分析

    在前面一篇文章中,我们分析了Android系统在启动时安装应用程序的过程,这些应用程序安装好之后,还需要有一个Home应用程序来负责把它们在桌面上展示出来,在Android系统中,这个默认的Home应 ...

  3. easymailobjects php,用easymailobject组件处理exchange邮件源代码(6)_asp实例

    在ASP中用EasyMailObject处理Exchange邮件源代码-发送邮件的界面(sendmail1.asp) ************************************* 这个文 ...

  4. c语言编程学生管理系统的代码,C语言学生管理系统源代码.doc

    C语言学生成绩管理系统源代码,保证能用-- #include "malloc.h" #include "stdio.h" #include "stdl ...

  5. java 流的方式抓取网页 但是显示不全_用java抓取网页源代码时总是无法获取完整的源代码信息,求指导...

    该楼层疑似违规已被系统折叠 隐藏此楼查看此楼 无论是用urlconnection还是httpurlconnection都只能获得一部分网页源代码(即有的标签内容在网页上右键-查看源代码能看到,但是用下 ...

  6. linux下从git获取有权限的代码,linux下从源代码安装git

    之所以有这样的需求,是因为部分预安装的git版本太低,很多功能没有并且安全性存在问题. 比如git submodule add xxx@host:yyy.git必须在父repo的root目录安装,而新 ...

  7. 【camera】自动泊车-视觉车位检测相关资料汇总(论文、数据集、源代码、相关博客、演示demo)(1)

    [camera]自动泊车-视觉车位检测相关资料汇总(论文.数据集.源代码.相关博客.演示demo)parking slot detection 论文 2020论文 2019论文 2018论文 2017 ...

  8. 如何查看OpenCV自带函数的源代码

    OpenCV提供的内部函数能实现好多图像处理功能,有时我们需要改进函数或者想看一下函数的具体实现,一般有以下两种方法来查看其内部函数代码: 方法一:在opencv的安装文件夹中找到 与头文件名字对应的 ...

  9. webstorm设置点击(单击)左侧项目资源管理器里面的文件,自动在右侧打开源代码文件

    点击左侧"项目"右上角齿轮勾选"自动滚动到源代码"

最新文章

  1. 最“燃”研究生!浙工大 64 岁研究生毕业,老师称其毕业论文写的最好
  2. 职场女人的心理问题提醒
  3. 在Source Insight中看Python代码
  4. ServletContext对象详解
  5. ASP.NET MVC 5 学习教程:Details 和 Delete 方法详解
  6. UVA - 839 Not so Mobile
  7. Web前后端笔记-vue cli及java进行AES加解密
  8. 理论基础 —— 二叉树 —— 顺序存储结构
  9. 计算机网络 原理与实验指导书,《计算机网络原理》实验指导书.doc
  10. [代码]Delphi实现双击左CTRL键调用记事本
  11. android driver log,Android调试驱动抓log的方法
  12. sql server 2008 r2 打开ssms管理工具,提示“值不能为空”问题
  13. ourdev 学习arm先看看,工具和软件分析
  14. 盘古搜索22日开通 欲打造一流搜索引擎
  15. 可汗学院公开课——统计学(1)——统计图
  16. cocos creator3.x 触控方向键实现
  17. java入门123 pdf下载_Java入门123:一个老鸟的Java学习心得 pdf_IT教程网
  18. 2021-07-14
  19. 500分能上的计算机院校,高考500分上下怎么选大学?推荐这几所学校
  20. 达梦数据库和mysql的语法区别

热门文章

  1. win的反义词_初中英语常见的同义词、反义词汇总
  2. 曙光服务器amd芯片,AMD六核体验 曙光Ar-H服务器首发评测.docx
  3. coreldraw x4被禁用怎么办,cdr非法产品详细x4解决教程
  4. DOH(DNS-over-HTTPs)服务器搭建
  5. 解析ARM中OS_CPU_A.S(中断级方式)
  6. image.fromstream 参数无效原因分析及解决
  7. 【笔记】【JavaScript】JSchallenger-Arrays对象-练习笔记
  8. 吉信通:如何使用电脑简单的发送短信
  9. 阿里巴巴Java开发规范(终极版v1.3.0)--个人整理
  10. Hadoop系列(一)——HDFS总结