diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..0d20b64 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/ClassificationExperiment.py b/ClassificationExperiment.py index 58ee9cf..1520219 100644 --- a/ClassificationExperiment.py +++ b/ClassificationExperiment.py @@ -124,7 +124,7 @@ def MakeGenerator(SampleList,NSamples, FailedLoad=False # Or Build the model from scratch -if not MyModel.Model: +if FailedLoad: import keras print "Building Model...", diff --git a/LoadData.py b/LoadData.py index 434884c..3372867 100644 --- a/LoadData.py +++ b/LoadData.py @@ -94,6 +94,7 @@ def LArIATDataGenerator(FileSearch="/data/LArIAT/*.h5",DownSampleSize=4, ScanWin Files = glob.glob(FileSearch) print "Found",len(Files),"files." + Files.sort() if MaxFiles!=-1: random.shuffle(Files) @@ -184,8 +185,10 @@ def DivideFiles(FileSearch="/data/LArIAT/h5_files/*.h5",Fractions=[.9,.1],datase if __name__ == '__main__': import sys - FileSearch="/data/LArIAT/h5_files/*.h5" - + #FileSearch="/data/LArIAT/h5_files/*.h5" + + # CHANGE TO /data/cloud/project/data/apr_9/2d/*.h5 + FileSearch="/data/cloud/project/data/apr_9/2d/*.h5" try: n_threads=int(sys.argv[1]) except: @@ -208,14 +211,14 @@ def DivideFiles(FileSearch="/data/LArIAT/h5_files/*.h5",Fractions=[.9,.1],datase Normalize=True closefiles=False Train_gen=LArIATDataGenerator(FileSearch=FileSearch, - cachefile="LArIAT-LoadDataTest-Cache.h5", + #cachefile="LArIAT-LoadDataTest-Cache.h5", max=128*10000, batchsize=128, DownSampleSize=DownSampleSize, ScanWindowSize=ScanWindowSize, Norm=Normalize, #shapes=[(128*m, 2, 240, 4096/DownSampleSize), (128*m, 16)], - shapes=[(128*m, 2, 240, ScanWindowSize), (128*m, 16)], + #shapes=[(128*m, 240, ScanWindowSize), (128, 240, 256)], n_threads=n_threads, SharedDataQueueSize=1, multiplier=m, diff --git a/Models.py b/Models.py index fd14f39..ceb3b68 100644 --- a/Models.py +++ b/Models.py @@ -83,5 +83,3 @@ def Build(self): self.modelT=modelT self.Model=Model(MInputs,modelT) - - diff --git a/Recon3DExperiment.py b/Recon3DExperiment.py new file mode 100644 index 0000000..c91e959 --- /dev/null +++ b/Recon3DExperiment.py @@ -0,0 +1,272 @@ +import sys, os, argparse + +# Parse the Arguments +execfile("LArTPCDNN/ClassificationArguments.py") + +# Process the ConfigFile +execfile(ConfigFile) + +# Now put config in the current scope. Must find a prettier way. +if "Config" in dir(): + for a in Config: + exec (a + "=" + str(Config[a])) + +# Use "--Test" to run on less events and epochs. +OutputBase = "TrainedModels" +if TestMode: + MaxEvents = int(20e3) + NTestSamples = int(20e2) + Epochs = 10 + OutputBase += ".Test" + print "Test Mode: Set MaxEvents to", MaxEvents, "and Epochs to", Epochs + +if LowMemMode: + n_threads = 1 + multiplier = 1 + +# Calculate how many events will be used for training/validation. +NSamples = MaxEvents - NTestSamples + + +NSamples = BatchSize + +# Function to help manage optional configurations. Checks and returns +# if an object is in current scope. Return default value if not. +def TestDefaultParam(Config): + def TestParamPrime(param, default=False): + if param in Config: + return eval(param) + else: + return default + + return TestParamPrime + + +TestDefaultParam = TestDefaultParam(dir()) + +# Load the Data. TODO +from LArTPCDNN.LoadData import * +from LArTPCDNN.Recon3DLoadData import * + +print "Loading 2D and 3D data" + +Train_gen = combined2D3DGenerator(datapath = "/data/cloud/project/data/apr_9/") + +print "Testing 2D and 3D data load" + +#FileSearch = "apr_9/2d/muon*.h5" +# +#TrainSampleList, TestSampleList = DivideFiles(FileSearch, +# [float(NSamples) / MaxEvents, float(NTestSamples) / MaxEvents], +# datasetnames=[u'features'], +# Particles=Particles) +#bins3d=(240, 240, 256) +# +#shapes = [(BatchSize * multiplier, 240, 4096 / DownSampleSize), +# (BatchSize * multiplier, 240, 4096 / DownSampleSize), +# (BatchSize * multiplier,) + bins3d] +# +#viewshape = (None, 240, 4096 / DownSampleSize) +# +# +#def MakeGenerator(SampleList, NSamples, +# cachefile="LArIAT-LoadDataTest-Cache.h5", **kwargs): +# return DLMultiClassFilterGenerator(TrainSampleList, FilterEnergy(EnergyCut), max=NSamples, +# preprocessfunction=ProcessWireData(DownSampleSize, ScanWindowSize, Normalize), +# postprocessfunction=MergeInputs(), +# batchsize=BatchSize, +# shapes=shapes, +# n_threads=n_threads, +# multiplier=multiplier, +# cachefile=cachefile, +# **kwargs) +# +# +## Use DLGenerators to read data +#Train_genC = MakeGenerator(TrainSampleList, NSamples, +# cachefile="/tmp/LArTPCDNN-LArIAT-TrainEvent-Cache.h5") +# +#Test_genC = MakeGenerator(TestSampleList, NTestSamples, +# cachefile="/tmp/LArTPCDNN-LArIAT-TestEvent-Cache.h5") +# +#print "Train Class Index Map:", Train_genC.ClassIndexMap +# print "Test Class Index Map:", Test_genC.ClassIndexMap + +#Cache = True + +#if Preload: +# print "Caching data in memory for faster processing after first epoch. Hope you have enough memory." +# Train_gen = Train_genC.PreloadGenerator() +# Test_gen = Test_genC.PreloadGenerator() +#elif Cache: +# print "Caching data on disk for faster processing after first epoch. Hope you have enough disk space." +# Train_gen = Train_genC.DiskCacheGenerator(n_threads_cache) +# Test_gen = Test_genC.DiskCacheGenerator(n_threads_cache) +#else: +# Train_gen = Train_genC.Generator() +# Test_gen = Test_genC.Generator() + +# Build/Load the Model +from DLTools.ModelWrapper import ModelWrapper +from LArTPCDNN.Recon3DModels import * + +# You can automatically load the latest previous training of this model. +if TestDefaultParam("LoadPreviousModel") and not LoadModel: + print "Looking for Previous Model to load." + ReconstructionModel = ModelWrapper(Name=Name, LoadPrevious=True, OutputBase=OutputBase) + +# You can load a previous model using "-L" option with the model directory. +if LoadModel: + print "Loading Model From:", LoadModel + if LoadModel[-1] == "/": LoadModel = LoadModel[:-1] + ReconstructionModel = ModelWrapper(Name=os.path.basename(LoadModel), InDir=os.path.dirname(LoadModel), + OutputBase=OutputBase) + ReconstructionModel.Load(LoadModel) + +if not ReconstructionModel.Model: + FailedLoad = True +else: + FailedLoad = False + +# Or Build the model from scratch +if FailedLoad: + import keras + + print "Building Model...", + + View1Shape = (240, 256) + View2Shape = (240, 256) + + ReconstructionModel = Model2DViewsTo3DDense(Name, View1Shape, View2Shape, Width, Depth, + BatchSize, 240*240*256, + init=TestDefaultParam("WeightInitialization", 'normal'), + # activation=TestDefaultParam("activation","relu"), + Dropout=TestDefaultParam("DropoutLayers", 0.5), + BatchNormalization=TestDefaultParam("BatchNormLayers", False), + OutputBase=OutputBase) + + ReconstructionModel.Build() + print " Done." + +print "Output Directory:", ReconstructionModel.OutDir +# Store the Configuration Dictionary +ReconstructionModel.MetaData["Configuration"] = Config +if "HyperParamSet" in dir(): + ReconstructionModel.MetaData["HyperParamSet"] = HyperParamSet + +# Print out the Model Summary +ReconstructionModel.Model.summary() + +# Compile The Model +print "Compiling Model." +ReconstructionModel.BuildOptimizer(optimizer, Config) +ReconstructionModel.Compile(Metrics=["accuracy"]) + +# Train +if Train or (RecoverMode and FailedLoad): + print "Training." + # Setup Callbacks + # These are all optional. + from DLTools.CallBacks import TimeStopping, GracefulExit + from keras.callbacks import * + + callbacks = [] + + # Still testing this... + + if TestDefaultParam("UseGracefulExit", 0): + print "Adding GracefulExit Callback." + callbacks.append(GracefulExit()) + + if TestDefaultParam("ModelCheckpoint", False): + ReconstructionModel.MakeOutputDir() + callbacks.append(ModelCheckpoint(ReconstructionModel.OutDir + "/Checkpoint.Weights.h5", + monitor=TestDefaultParam("monitor", "val_loss"), + save_best_only=TestDefaultParam("ModelCheckpoint_save_best_only"), + save_weights_only=TestDefaultParam("ModelCheckpoint_save_weights_only"), + mode=TestDefaultParam("ModelCheckpoint_mode", "auto"), + period=TestDefaultParam("ModelCheckpoint_period", 1), + verbose=0)) + + if TestDefaultParam("EarlyStopping"): + callbacks.append(keras.callbacks.EarlyStopping(monitor=TestDefaultParam("monitor", "val_loss"), + min_delta=TestDefaultParam("EarlyStopping_min_delta", 0.01), + patience=TestDefaultParam("EarlyStopping_patience"), + mode=TestDefaultParam("EarlyStopping_mode", 'auto'), + verbose=0)) + + if TestDefaultParam("RunningTime"): + print "Setting Runningtime to", RunningTime, "." + TSCB = TimeStopping(TestDefaultParam("RunningTime", 3600 * 6), verbose=False) + callbacks.append(TSCB) + + # Don't fill the log files with progress bar. + if sys.flags.interactive: + verbose = 1 + else: + verbose = 1 # Set to 2 + + print "Evaluating score on test sample..." + #score = ReconstructionModel.Model.evaluate_generator(Test_gen, steps=NTestSamples / BatchSize) + + #print "Initial Score:", score + #ReconstructionModel.MetaData["InitialScore"] = score + + ReconstructionModel.History = ReconstructionModel.Model.fit_generator(Train_gen, + steps_per_epoch=(NSamples / BatchSize), + epochs=Epochs, + verbose=verbose, + #validation_data=Test_gen, + #validation_steps=NTestSamples / BatchSize, + #callbacks=callbacks + ) + + score = ReconstructionModel.Model.evaluate_generator(Test_gen, steps=NTestSamples / BatchSize) + + print "Evaluating score on test sample..." + print "Final Score:", score + ReconstructionModel.MetaData["FinalScore"] = score + + if TestDefaultParam("RunningTime"): + ReconstructionModel.MetaData["EpochTime"] = TSCB.history + + # Store the parameters used for scanning for easier tables later: + for k in Params: + ReconstructionModel.MetaData[k] = Config[k] + + # Save Model + ReconstructionModel.Save() +else: + print "Skipping Training." + +# Analysis +if Analyze: + Test_genC = MakeGenerator(TestSampleList, NTestSamples, + cachefile=Test_genC.cachefilename) # "/tmp/LArTPCDNN-LArIAT-TestEvent-Cache.h5") + + Test_genC.PreloadData(n_threads_cache) + [Test_X_View1, Test_X_View2], Test_Y = MergeInputs()(tuple(Test_genC.D)) + + from DLAnalysis.Classification import MultiClassificationAnalysis + + result, NewMetaData = MultiClassificationAnalysis(ReconstructionModel, [Test_X_View1, Test_X_View2], + Test_Y, BatchSize, PDFFileName="ROC", + IndexMap=Test_genC.ClassIndexMap) + + ReconstructionModel.MetaData.update(NewMetaData) + + # Save again, in case Analysis put anything into the Model MetaData + if not sys.flags.interactive: + ReconstructionModel.Save() + else: + print "Warning: Interactive Mode. Use ReconstructionModel.Save() to save Analysis Results." + +# Make sure all of the Generators processes and threads are dead. +# Not necessary... but ensures a graceful exit. +# if not sys.flags.interactive: +# for g in GeneratorClasses: +# try: +# g.StopFiller() +# g.StopWorkers() +# except: +# pass diff --git a/Recon3DLoadData.py b/Recon3DLoadData.py new file mode 100644 index 0000000..8d3abab --- /dev/null +++ b/Recon3DLoadData.py @@ -0,0 +1,288 @@ +import h5py +import glob, os, sys, time +import numpy as np + +from DLTools.ThreadedGenerator import DLMultiClassGenerator, DLMultiClassFilterGenerator + +def main(): + #datapath = "/data/datasets/LarTPC/apr_9/" + datapath = "/data/cloud/project/data/apr_9/" + # Pull in datafiles + filelist2d = glob.glob(datapath + "2d/*") + filelist3d = glob.glob(datapath + "3d/*") + filelist2d.sort() + filelist3d.sort() + assert len(filelist2d) == len(filelist3d), "Number of 2D and 3D files mismatch!" + + try: + n_threads = int(sys.argv[1]) + except: + n_threads = 6 + + try: + n_threads2 = int(sys.argv[2]) + except: + n_threads2 = n_threads + + + Train_gen = LarTPCDataGenerator(filelist3d, n_threads=n_threads, max=100000, + bins=(240, 240, 256), verbose=False) + + DownSampleSize=8 + ScanWindowSize=256 + Normalize=True + closefiles=False + m = 1 + + print "Generator Ready" + print "ClassIndex:", Train_gen.ClassIndexMap + print "Object Shape:", Train_gen.shapes + sys.stdout.flush() + + N = 1 + NN = n_threads + count = 0 + old = start = time.time() + for tries in xrange(1): + print "*********************Try:", tries + # for D in Train_gen.Generator(): + # GENERATOR CALLED HERE, FEED THIS TO OUTPUT FOR 3D + for D in Train_gen.Generator(): + NN -= 0 + if NN < 0: + break + start1 = time.time() + Delta = (start1 - start) + Delta2 = (start1 - old) + old = start1 + print count, ":", Delta, ":", Delta / float(N), Delta2 + sys.stdout.flush() + N += 1 + for d in D: + print d.shape + print d[np.where(d != 0.)] + NN = d.shape[0] + # print d[0] + pass + count += NN + + +def combined2D3DGenerator(datapath): + #datapath = "/data/datasets/LarTPC/apr_9/" + # Pull in datafiles + filelist2d = glob.glob(datapath + "2d/*") + filelist3d = glob.glob(datapath + "3d/*") + filelist2d.sort() + filelist3d.sort() + assert len(filelist2d) == len(filelist3d), "Number of 2D and 3D files mismatch!" + + try: + n_threads = int(sys.argv[1]) + except: + n_threads = 6 + + try: + n_threads2 = int(sys.argv[2]) + except: + n_threads2 = n_threads + + + Train_gen3D = LarTPCDataGenerator(filelist3d, n_threads=n_threads, max=100000, + bins=(240, 240, 256), verbose=False) + + DownSampleSize=8 + ScanWindowSize=256 + Normalize=True + closefiles=False + m = 1 + Train_gen2D =LArIATDataGenerator(FileSearch=datapath + "2d/*", + max=128*10000, + batchsize=128, + DownSampleSize=DownSampleSize, + ScanWindowSize=ScanWindowSize, + Norm=Normalize, + #shapes=[(128*m, 2, 240, 4096/DownSampleSize), (128*m, 16)], + #shapes=[(128*m, 240, ScanWindowSize), (128, 240, 256)], + #shapes=[(128*m, 2, 240, ScanWindowSize), (128*m, 16)], + #shapes=[(128*m, 240, ScanWindowSize)], + n_threads=n_threads, + SharedDataQueueSize=1, + multiplier=m, + closefiles=closefiles, + verbose=False, + timing=False, + sleep=1, + Wrap=False) + + + def MergerGenerator(T2D, T3D): + while True: + s2d = T2D.next() + s3d = T3D.next() + yield ([s2d[0], s2d[1]], [np.reshape(s3d[0], [128,240*240*256])]) + + return MergerGenerator(Train_gen2D.Generator(), Train_gen3D.Generator()) + + + +def LArIATDataGenerator(FileSearch="/data/LArIAT/*.h5",DownSampleSize=4, ScanWindowSize=256,EnergyCut=0.61, + datasetnames=[u'features'], Norm=False, MaxFiles=-1, **kwargs): + + print "Searching in :",FileSearch + Files = glob.glob(FileSearch) + + print "Found",len(Files),"files." + Files.sort() + + if MaxFiles!=-1: + random.shuffle(Files) + Files=Files[:MaxFiles] + + Samples=[] + + FileCount=0 + + for F in Files: + FileCount+=1 + basename=os.path.basename(F) + ParticleName=basename.split("_")[0] + + Samples.append((F,datasetnames,ParticleName)) + if MaxFiles>0: + if FileCount>MaxFiles: + break + + GC= DLMultiClassFilterGenerator(Samples, FilterEnergy(EnergyCut), + preprocessfunction=ProcessWireData(DownSampleSize,ScanWindowSize,Norm), + **kwargs) + return GC + + + +def LarTPCDataGenerator(files="/data", is2D = False, batchsize=128, datasets=['images3D/C', 'images3D/V'], Norm=True, + bins=None, EnergyCut=0.61, DownSampleSize = 2, ScanWindowSize = 256, **kwargs): + Samples = [] + + for F in files: + basename = os.path.basename(F) + ParticleName = basename.split("_")[0] + Samples.append((F, datasets, ParticleName)) + + # Samples = [ (Directory+"muon_158.2d.h5", datasets, "data")] + + def MakeImage(bins, Norm=True): + if bins != None: + def f(D): + for i in xrange(D[0].shape[0]): + if Norm: + w = np.tanh(np.sign(D[1][i]) * np.log(np.abs(D[1][i]) + 1.0) / 2.0) + else: + w = D[1][i] + R, b = np.histogramdd(D[0][i], bins=list(bins), weights=w) + return [R] + D[2:] + return f + else: + return False + + if bins == None: + bins = (0,) + + if is2D: + GC= DLMultiClassFilterGenerator(Samples, FilterEnergy(EnergyCut), + preprocessfunction=ProcessWireData(DownSampleSize,ScanWindowSize,Norm), + **kwargs) + + else: + GC = DLMultiClassGenerator(Samples, batchsize=batchsize, + preprocessfunction=MakeImage(bins, False), + OneHot=True, + shapes=[(batchsize,) + bins, (batchsize, 2)], + **kwargs) + + return GC + + +def FilterEnergy(MinEnergy): + def filterfunction(batchdict): + r= np.where(np.array(batchdict['Eng']) > MinEnergy) + return r[0] + + return filterfunction + +def ProcessWireData(DownSampleFactor,ScanWindowSize,Norm=True): + def processfunction(D): + X=D[0] + BatchSize=X.shape[0] + if DownSampleFactor > 1: + X,Ny= DownSample(X,DownSampleFactor,BatchSize) + if ScanWindowSize>0: + #X,i,j=ScanWindow(X,ScanWindowSize,240,Ny) + X=crop_batch(X,ScanWindowSize) + + if Norm: + X = np.tanh(np.sign(X) * np.log(np.abs(X) + 1.0) / 2.0) + return [X[:,0,:,:],X[:,1,:,:]] +D[1:] + return processfunction + +# From Peter Sadowski +def crop_example(X, interval, augment=None): + ''' + Crop X by finding time interval with maximal energy. + X = tensor of shape (num_channel, x, y) = (2 channels, 240 wires, time steps) + interval = length of desired time step window + augment = If integer, randomly translate the time window up to this many steps. + ''' + assert len(X.shape) == 3, "Example is expected to be three-dimensional." + energy = np.sum(X, axis=(0,1)) + assert energy.ndim == 1 + cumsum = np.cumsum(energy, dtype='float64') + assert not np.any(np.isnan(cumsum)) + assert np.all(np.isfinite(cumsum)) + intsum = cumsum[interval:] - cumsum[:-interval] + maxstart = np.argmax(intsum) # NOTE: maxend=interval+np.argmax(intsum) + + if augment: + rsteps = np.random.random_integers(-augment, augment) + if rsteps < 0: + maxstart = max(0, maxstart + rsteps) + else: + maxstart = min(len(energy)-interval, maxstart + rsteps) + + return X[:, :, maxstart:maxstart+interval] + +def crop_batch(X, interval, augment=None): + new_X = np.zeros(shape=(X.shape[0],X.shape[1],X.shape[2],interval), dtype='float32') + for i in range(X.shape[0]): + new_X[i,:,:,:] = crop_example(X[i,:,:,:], interval, augment) + return new_X + + +def shuffle_in_unison_inplace(a, b, c=False): + assert len(a) == len(b) + p = np.random.permutation(len(a)) + if type(c) != bool: + return a[p], b[p], c[p] + return a[p], b[p] + +def DownSample(y,factor,batchsize,sumabs=False): + Nx=batchsize + Ny=y.shape[1] + Nz=y.shape[2] + Nw=y.shape[3] + + if factor==0: + return np.reshape(y,[Nx,Ny,Nz,Nw]),Nw + # Remove entries at the end so Down Sampling works + NwNew=Nw-Nw%factor + features1=np.reshape(y,[Nx,Ny,Nz,Nw])[:,:,:,0:NwNew] + # DownSample + if sumabs: + features_Down=abs(features1.reshape([Nz*NwNew/factor,factor])).sum(axis=3).reshape([Nx,Ny,Nz,NwNew/factor]) + else: + features_Down=features1.reshape([Nx,Ny,Nz*NwNew/factor,factor]).sum(axis=3).reshape([Nx,Ny,Nz,NwNew/factor]) + return features_Down, NwNew + + + +if __name__ == '__main__': + main() diff --git a/Recon3DModels.py b/Recon3DModels.py new file mode 100644 index 0000000..79a31d5 --- /dev/null +++ b/Recon3DModels.py @@ -0,0 +1,123 @@ +from DLTools.ModelWrapper import * + +from keras.layers.merge import concatenate +from keras.models import Sequential, Model +from keras.layers.core import Dense, Activation +from keras.layers import BatchNormalization, Dropout, Flatten, Input +from keras.models import model_from_json +from keras.layers import Input, Dense, Conv2D, MaxPooling2D, Conv3D, UpSampling3D +from keras.models import Model +from keras import backend as K +from keras.callbacks import TensorBoard +import numpy as np + + +class Model2DViewsTo3DDense(ModelWrapper): + def __init__(self, Name, View1Shape, View2Shape, width=0, depth=0, BatchSize=2048, N_Classes=0, + init=0, BatchNormalization=False, Dropout=False, **kwargs): + super(Model2DViewsTo3DDense, self).__init__(Name, Loss="categorical_crossentropy", **kwargs) + + self.width = width + self.depth = depth + self.init = init + + self.Dropout = Dropout + self.BatchSize = BatchSize + self.BatchNormalization = BatchNormalization + + self.input1_shape = View1Shape + self.input2_shape = View2Shape + self.N_Classes = N_Classes + + self.MetaData.update({"width": self.width, + "depth": self.depth, + "Dropout": self.Dropout, + "BatchNormalization": BatchNormalization, + "input1_shape": self.input1_shape, + "input2_shape": self.input2_shape, + "N_classes": self.N_Classes, + "init": self.init}) + + def Build(self): + input1 = Input(self.input1_shape) + input2 = Input(self.input2_shape) + flat1 = Flatten(input_shape=self.input1_shape)(input1) + flat2 = Flatten(input_shape=self.input2_shape)(input2) + modelT = concatenate([flat1, flat2]) + + # model.add(Dense(self.width,init=self.init)) + modelT = (Activation('relu')(modelT)) + + for i in xrange(0, self.depth): + if self.BatchNormalization: + modelT = BatchNormalization()(modelT) + + modelT = Dense(self.width, kernel_initializer=self.init)(modelT) + modelT = Activation("softmax")(modelT) + + if self.Dropout: + modelT = Dropout(self.Dropout)(modelT) + + modelT = Dense(self.N_Classes, activation='softmax', kernel_initializer=self.init)(modelT) + + self.Model = Model(inputs=[input1, input2], outputs=modelT) + + + +class Model2DViewsTo3DConv(ModelWrapper): + def __init__(self, Name, View1Shape, View2Shape, width=0, depth=0, BatchSize=2048, N_Classes=0, + init=0, BatchNormalization=False, Dropout=False, **kwargs): + super(Model2DViewsTo3DConv, self).__init__(Name, Loss="categorical_crossentropy", **kwargs) + + self.width = width + self.depth = depth + self.init = init + + self.Dropout = Dropout + self.BatchSize = BatchSize + self.BatchNormalization = BatchNormalization + + self.input1_shape = View1Shape + self.input2_shape = View2Shape + self.N_Classes = N_Classes + + self.MetaData.update({"width": self.width, + "depth": self.depth, + "Dropout": self.Dropout, + "BatchNormalization": BatchNormalization, + "input1_shape": self.input1_shape, + "input2_shape": self.input2_shape, + "N_classes": self.N_Classes, + "init": self.init}) + + def Build(self): + input1 = Input(self.input1_shape) + input2 = Input(self.input2_shape) + + x = Conv2D(64, (3, 3), strides=(1, 1), activation='relu', padding='same')(input1) + x = MaxPooling2D((2, 2), padding='same')(x) + x = Conv2D(32, (3, 3), activation='relu', padding='same')(x) + x = MaxPooling2D((2, 2), padding='same')(x) + x = Conv2D(8, (3, 3), activation='relu', padding='same')(x) + encoded1 = MaxPooling2D((2, 2), padding='same')(x) + + y = Conv2D(64, (3, 3), strides=(1, 1), activation='relu', padding='same')(input2) + y = MaxPooling2D((2, 2), padding='same')(y) + y = Conv2D(32, (3, 3), activation='relu', padding='same')(y) + y = MaxPooling2D((2, 2), padding='same')(y) + y = Conv2D(8, (3, 3), activation='relu', padding='same')(y) + encoded2 = MaxPooling2D((2, 2), padding='same')(y) + + # concatenate images + #z = concatenate([encoded1, encoded2]) + + # Now decode in 3D + z = Conv3D(8, (3, 3, 3), activation='relu', padding='same')(encoded1 + encoded2) + z = UpSampling3D((2, 2, 2))(z) + z = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(z) + z = UpSampling3D((2, 2, 2))(z) + z = Conv3D(64, (3, 3, 3), activation='relu')(z) + z = UpSampling3D((2, 2, 2))(z) + decoded = Conv3D(1, (3, 3, 3), activation='sigmoid', padding='same')(z) + + self.Model = Model(inputs=[input1, input2], outputs=decoded) diff --git a/Recon3DModels.py~ b/Recon3DModels.py~ new file mode 100644 index 0000000..1b06e56 --- /dev/null +++ b/Recon3DModels.py~ @@ -0,0 +1,58 @@ +from DLTools.ModelWrapper import * + +from keras.layers.merge import concatenate +from keras.models import Sequential, Model +from keras.layers.core import Dense, Activation +from keras.layers import BatchNormalization,Dropout,Flatten, Input +from keras.models import model_from_json + +class Model2DViewsTo3D(ModelWrapper): + def __init__(self, Name, View1Shape, View2Shape, width=0, depth=0, BatchSize=2048, N_Classes=0, + init=0, BatchNormalization=False, Dropout=False, **kwargs): + super(Model2DViewsTo3D, self).__init__(Name,**kwargs) + + + self.width=width + self.depth=depth + self.init=init + + self.Dropout=Dropout + self.BatchSize=BatchSize + self.BatchNormalization=BatchNormalization + + self.input1_shape = View1Shape + self.input2_shape = View2Shape + self.N_Classes = N_Classes + + self.MetaData.update({ "width":self.width, + "depth":self.depth, + "Dropout":self.Dropout, + "BatchNormalization":BatchNormalization, + "input1_shape":self.input1_shape, + "input2_shape":self.input2_shape, + "N_classes":self.N_Classes, + "init":self.init}) + + def Build(self): + input1=Input(self.input1_shape) + input2=Input(self.input2_shape) + input1 = Flatten(input_shape=self.input1_shape)(input1) + input2 = Flatten(input_shape=self.input2_shape)(input2) + modelT = concatenate([input1, input2]) + + #model.add(Dense(self.width,init=self.init)) + modelT = (Activation('relu')(modelT)) + + for i in xrange(0,self.depth): + if self.BatchNormalization: + modelT=BatchNormalization()(modelT) + + modelT=Dense(self.width,kernel_initializer=self.init)(modelT) + modelT=Activation(self.Activation)(modelT) + + if self.Dropout: + modelT=Dropout(self.Dropout)(modelT) + + modelT=Dense(self.N_Classes, activation='softmax',kernel_initializer=self.init)(modelT) + + self.Model=Model(input,modelT)