#Copyright (C) 2009-2010 Roland Memisevic # #This software and any documentation and/or information supplied with #it is distributed on an as is basis. Roland Memisevic makes no warranties, #express or implied, including but not limited to implied warranties #of merchantability and fitness for a particular purpose, regarding #the documentation, functions or performance of the software, #documentation and/or information. # #This file contains classes that implement factored higher order #Boltzmann machines. #See: #Roland Memisevic, Geoffrey Hinton (2010): #Learning to Represent Spatial Transformations with Factored Higher-Order Boltzmann Machines (Neural Computation). from numpy import zeros, ones, newaxis, array, asarray, double, dot, \ concatenate, log, exp, sum, reshape, isnan import numpy.random class FactoredGbm(object): """Factored gated Boltzmann machine model (base class). Gated Boltzmann machine whose weight-tensor is factorized as W_ijk=w_ij*w_jk*wik See: Roland Memisevic, Geoffrey Hinton (2010): Learning to Represent Spatial Transformations with Factored Higher-Order Boltzmann Machines (Neural Computation). """ def __init__(self,numin,numout,nummap,numfactors,\ sparsitygain=0.0,targethidprobs=0.2, cditerations=1, meanfield_output=False, premap=None,postmap=None, optlevel=0, momentum=0.9, stepsize=0.001, verbose=True): self.optlevel = optlevel self.numfactors = numfactors self.numin = numin self.numout = numout self.nummap = nummap self.premap = premap self.postmap = postmap if premap is not None: self.numin = self.premap[1].shape[0] self.numout = self.postmap[0].shape[1] self.sparsitygain = sparsitygain self.targethidprobs = targethidprobs self.cditerations = cditerations self.meanfield_output = meanfield_output self.params = 0.01*numpy.random.randn(self.numin*self.numfactors+\ self.numout*self.numfactors+\ self.nummap*self.numfactors+\ self.numout+\ self.nummap) self.numparams = self.params.shape[0] self.verbose = verbose self.numthreewayparams = numfactors*(numin+numout+nummap) self.wxf = self.params[:numin*numfactors].reshape((numin,numfactors)) self.wyf = self.params[numin*numfactors:\ numin*numfactors+numout*numfactors].\ reshape((numout,numfactors)) self.whf = self.params[numin*numfactors+numout*numfactors: numin*numfactors+numout*numfactors+ nummap*numfactors].reshape((nummap,numfactors)) self.wy = self.params[self.numthreewayparams:\ self.numthreewayparams+numout].\ reshape((numout,1)) self.wh = self.params[self.numthreewayparams+numout:\ self.numthreewayparams+numout+nummap].\ reshape((nummap,1)) self.wh*=0.0 self.wy*=0.0 self.prods_wxf = zeros(self.wxf.shape, 'double') self.prods_wyf = zeros(self.wyf.shape, 'double') self.prods_whf = zeros(self.whf.shape, 'double') self.momentum = momentum self.stepsize = stepsize self.inc = zeros(self.params.shape, 'double') def train(self, *args): numsteps = args[-1] for step in range(numsteps): self.inc[:] = self.momentum*self.inc - self.stepsize * self.grad(*args[:-1]) if isnan(sum(self.inc)): print 'nan!' self.inc = numpy.zeros(self.inc.shape, 'double') self.params += self.inc def updateparams(self, newparams): self.params[:] = newparams.copy() def grad(self, data, weightcost): posgrad = self.energy_grad(self.posdata(data)) if self.sparsitygain > 0.0: sparsitygrad = self.sparsitygrad(self.hids, data) else: sparsitygrad = 0.0 neggrad = self.energy_grad(self.negdata(data)) grad = -posgrad + neggrad grad += sparsitygrad weightcostgrad_x = weightcost*self.wxf.flatten() weightcostgrad_y = weightcost*self.wyf.flatten() weightcostgrad_h = weightcost*self.whf.flatten() weightcostgrad = concatenate((weightcostgrad_x, weightcostgrad_y, weightcostgrad_h)) grad[:self.numthreewayparams] += weightcostgrad #grad[self.numthreewayparams:] *= 0.1 #smaller learningrate for biases return grad def energy_grad(self, data): inputs, hidprobs, outputs = data numcases = inputs.shape[1] self.prods_wxf *= 0.0 self.prods_wyf *= 0.0 self.prods_whf *= 0.0 factors_x = dot(inputs.T,self.wxf) factors_y = dot(outputs.T,self.wyf) factors_h = dot(hidprobs.T,self.whf) if self.optlevel > 0: if self.xy or self.yh or self.xh: assert False, "two-way biases not implemented for optlevel>0" from scipy import weave code = r""" for (int c=0; c 0: from scipy import weave result = zeros((self.nummap,numcases), 'double') code = r""" #include for(int c=0;c numpy.random.rand(hidprobs.shape[0],\ hidprobs.shape[1])).astype(float) def sample_obs(self,outprobs): return (outprobs > numpy.random.rand(outprobs.shape[0],\ outprobs.shape[1])).astype(float) class FactoredGbmBinGauss(FactoredGbm): def __init__(self,numin,numout,nummap,numfactors,\ sparsitygain=0.0,targethidprobs=0.1,nu=1.0,cditerations=1,\ premap=None,postmap=None,\ xy = False, xh = False, yh = True,\ momentum=0.9, stepsize=0.01, verbose=0): FactoredGbm.__init__(self,numin,numout,nummap,numfactors,\ sparsitygain,targethidprobs,cditerations=cditerations,\ premap=premap,postmap=postmap,\ xy=xy,xh=xh,yh=yh,momentum=momentum, stepsize=stepsize,verbose=verbose) self.meanfield_output = False self.nu = nu def hidprobs(self, outputs, inputs): if len(outputs.shape)<2: #got rank-1 array? outputs=outputs[:,newaxis] numcases = outputs.shape[1] if len(inputs.shape)<2: #got rank-1 array? inputs=inputs[:,newaxis] factors_x = self.factors_x(inputs) factors_y = self.factors_y(outputs) if self.optlevel > 0: from scipy import weave result = zeros((self.nummap,numcases), 'double') code = r""" #include #include Py_BEGIN_ALLOW_THREADS for(int c=0;c 0: from scipy import weave result = zeros((self.numout,numcases), 'double') code = r""" #include #include Py_BEGIN_ALLOW_THREADS for(int c=0;c numpy.random.rand(*hidprobs.shape)).astype(float) def sample_obs(self, outprobs): return (outprobs + numpy.random.randn(*outprobs.shape))*self.nu