3D Brain Tumor Segmentation and Identification in Python
plotting.plot_stat_map('out/4.nii.gz')
<nilearn.plotting.displays.OrthoSlicer at 0x7f09bfc9e6a0>
!pip install nilearn
Collecting nilearn Downloading https://files.pythonhosted.org/packages/b9/c2/f5f1bdd37a3da28b3b34305e4ba27cce468db6073998d62a38abd0e281da/nilearn-0.6.2-py3-none-any.whl (2.5MB) |████████████████████████████████| 2.5MB 7.3MB/s Requirement already satisfied: scikit-learn>=0.19 in /usr/local/lib/python3.6/dist-packages (from nilearn) (0.22.2.post1) Requirement already satisfied: scipy>=0.19 in /usr/local/lib/python3.6/dist-packages (from nilearn) (1.4.1) Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.6/dist-packages (from nilearn) (0.15.1) Requirement already satisfied: numpy>=1.11 in /usr/local/lib/python3.6/dist-packages (from nilearn) (1.18.5) Requirement already satisfied: sklearn in /usr/local/lib/python3.6/dist-packages (from nilearn) (0.0) Requirement already satisfied: nibabel>=2.0.2 in /usr/local/lib/python3.6/dist-packages (from nilearn) (3.0.2) Installing collected packages: nilearn Successfully installed nilearn-0.6.2
!pip install SimpleITK
Collecting SimpleITK Downloading https://files.pythonhosted.org/packages/f8/d8/53338c34f71020725ffb3557846c80af96c29c03bc883551a2565aa68a7c/SimpleITK-1.2.4-cp36-cp36m-manylinux1_x86_64.whl (42.5MB) |████████████████████████████████| 42.5MB 70kB/s Installing collected packages: SimpleITK Successfully installed SimpleITK-1.2.4
!pwd
/content
!pip show tensorflow-gpu
Name: tensorflow-gpu Version: 1.15.2 Summary: TensorFlow is an open source machine learning framework for everyone. Home-page: https://www.tensorflow.org/ Author: Google Inc. Author-email: packages@tensorflow.org License: Apache 2.0 Location: /usr/local/lib/python3.6/dist-packages Requires: tensorflow-estimator, wrapt, grpcio, termcolor, tensorboard, opt-einsum, numpy, protobuf, absl-py, six, wheel, keras-preprocessing, gast, google-pasta, keras-applications, astor Required-by:
import os import glob # os.environ['CUDA_VISIBLE_DEVICES'] = '-1' from main_model import model_ import keras config = dict() config["image_shape"] = (32, 32, 32) # This determines what shape the images will be cropped/resampled to. config["patch_shape"] = None # switch to None to train on the whole image config["labels"] = (1) # the label numbers on the input image config["n_base_filters"] = 16 config["n_labels"] = 1 # config["all_modalities"] = ["t1"] config["all_modalities"] = ["t1", "t1ce", "flair", "t2"] config["training_modalities"] = config["all_modalities"] # change this if you want to only use some of the modalities config["nb_channels"] = len(config["training_modalities"]) if len(config["training_modalities"])>1: from all_data import write_data_to_file, open_data_file else: from data import write_data_to_file, open_data_file if "patch_shape" in config and config["patch_shape"] is not None: config["input_shape"] = tuple(list(config["patch_shape"]) + [config["nb_channels"]] ) else: config["input_shape"] = tuple( list(config["image_shape"]) + [config["nb_channels"]] ) config["truth_channel"] = config["nb_channels"] config["deconvolution"] = True # config["batch_size"] = 1 config["validation_batch_size"] = 1 config["n_epochs"] = 500 # cutoff the training after this many epochs config["patience"] = 10 # learning rate will be reduced after this many epochs if the validation loss is not improving config["early_stop"] = 50 # training will be stopped after this many epochs without the validation loss improving config["initial_learning_rate"] = 5e-4 config["learning_rate_drop"] = 0.5 # factor by which the learning rate will be reduced config["validation_split"] = 0.8 # portion of the data that will be used for training config["flip"] = False # augments the data by randomly flipping an axis during config["permute"] = True # data shape must be a cube. Augments the data by permuting in various directions config["distort"] = None # switch to None if you want no distortion config["augment"] = config["flip"] or config["distort"] config["validation_patch_overlap"] = 0 # if > 0, during training, validation patches will be overlapping config["training_patch_start_offset"] = (16, 16, 16) # randomly offset the first patch index by up to this offset config["skip_blank"] = True # if True, then patches without any target will be skipped config["data_file_train"] = os.path.abspath("brats_data_train.h5") config["data_file_valid"] = os.path.abspath("brats_data_valid.h5") config["overwrite"] = False def fetch_training_data_files(return_subject_ids=False): training_data_files = list() subject_ids = list() for subject_dir in glob.glob(os.path.join(os.path.dirname(os.path.realpath('__file__')), "preprocessed", "*", "*")): print(subject_dir) # quit() subject_ids.append(os.path.basename(subject_dir)) name_data = subject_dir.split('/')[-1] subject_files = list() for modality in config["training_modalities"] + ["seg"]: base_name = name_data + '_' + modality subject_files.append(os.path.join(subject_dir, base_name + ".nii.gz")) training_data_files.append(tuple(subject_files)) if return_subject_ids: return training_data_files, subject_ids else: return training_data_files def fetch_validate_data_files(return_subject_ids=False): training_data_files = list() subject_ids = list() for subject_dir in glob.glob(os.path.join(os.path.dirname(os.path.realpath('__file__')), "preproess_validate", "*", "*")): subject_ids.append(os.path.basename(subject_dir)) name_data = subject_dir.split('/')[-1] subject_files = list() for modality in config["training_modalities"] + ["seg"]: base_name = name_data + '_' + modality subject_files.append(os.path.join(subject_dir, base_name + ".nii.gz")) training_data_files.append(tuple(subject_files)) if return_subject_ids: return training_data_files, subject_ids else: return training_data_files
# overwrite=config["overwrite"] # convert input images into an hdf5 file # if overwrite or not os.path.exists(config["data_file_train"]): # training_files, subject_ids = fetch_training_data_files(return_subject_ids=True) # write_data_to_file(training_files, config["data_file_train"], image_shape=config["image_shape"], # subject_ids=subject_ids) data_file_opened = open_data_file(config["data_file_train"]) import numpy as np train_data=np.array(data_file_opened.root.data) # afine_data=np.array(data_file_opened.root.affine) train_target=np.array(data_file_opened.root.truth) del data_file_opened
data_file_opened_test = open_data_file(config["data_file_valid"]) import numpy as np test_data=np.array(data_file_opened_test.root.data) test_target=np.array(data_file_opened_test.root.truth) print(config["input_shape"]) del data_file_opened_test
(32, 32, 32, 4)
from unet import unet_model_3d model=unet_model_3d(input_shape=config["input_shape"],n_labels=1)
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/resource_variable_ops.py:1630: calling BaseResourceVariable.__init__ (from tensorflow.python.ops.resource_variable_ops) with constraint is deprecated and will be removed in a future version. Instructions for updating: If using Keras pass *_constraint arguments to layers. 512 Model: "model_1" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_1 (InputLayer) (None, 32, 32, 32, 4 0 __________________________________________________________________________________________________ conv3d_1 (Conv3D) (None, 32, 32, 32, 3 3488 input_1[0][0] __________________________________________________________________________________________________ activation_1 (Activation) (None, 32, 32, 32, 3 0 conv3d_1[0][0] __________________________________________________________________________________________________ conv3d_2 (Conv3D) (None, 32, 32, 32, 6 55360 activation_1[0][0] __________________________________________________________________________________________________ activation_2 (Activation) (None, 32, 32, 32, 6 0 conv3d_2[0][0] __________________________________________________________________________________________________ max_pooling3d_1 (MaxPooling3D) (None, 16, 16, 16, 6 0 activation_2[0][0] __________________________________________________________________________________________________ conv3d_3 (Conv3D) (None, 16, 16, 16, 6 110656 max_pooling3d_1[0][0] __________________________________________________________________________________________________ activation_3 (Activation) (None, 16, 16, 16, 6 0 conv3d_3[0][0] __________________________________________________________________________________________________ conv3d_4 (Conv3D) (None, 16, 16, 16, 1 221312 activation_3[0][0] __________________________________________________________________________________________________ activation_4 (Activation) (None, 16, 16, 16, 1 0 conv3d_4[0][0] __________________________________________________________________________________________________ max_pooling3d_2 (MaxPooling3D) (None, 8, 8, 8, 128) 0 activation_4[0][0] __________________________________________________________________________________________________ conv3d_5 (Conv3D) (None, 8, 8, 8, 128) 442496 max_pooling3d_2[0][0] __________________________________________________________________________________________________ activation_5 (Activation) (None, 8, 8, 8, 128) 0 conv3d_5[0][0] __________________________________________________________________________________________________ conv3d_6 (Conv3D) (None, 8, 8, 8, 256) 884992 activation_5[0][0] __________________________________________________________________________________________________ activation_6 (Activation) (None, 8, 8, 8, 256) 0 conv3d_6[0][0] __________________________________________________________________________________________________ max_pooling3d_3 (MaxPooling3D) (None, 4, 4, 4, 256) 0 activation_6[0][0] __________________________________________________________________________________________________ conv3d_7 (Conv3D) (None, 4, 4, 4, 256) 1769728 max_pooling3d_3[0][0] __________________________________________________________________________________________________ activation_7 (Activation) (None, 4, 4, 4, 256) 0 conv3d_7[0][0] __________________________________________________________________________________________________ conv3d_8 (Conv3D) (None, 4, 4, 4, 512) 3539456 activation_7[0][0] __________________________________________________________________________________________________ activation_8 (Activation) (None, 4, 4, 4, 512) 0 conv3d_8[0][0] __________________________________________________________________________________________________ up_sampling3d_1 (UpSampling3D) (None, 8, 8, 8, 512) 0 activation_8[0][0] __________________________________________________________________________________________________ concatenate_1 (Concatenate) (None, 8, 8, 8, 768) 0 up_sampling3d_1[0][0] activation_6[0][0] __________________________________________________________________________________________________ conv3d_9 (Conv3D) (None, 8, 8, 8, 256) 5308672 concatenate_1[0][0] __________________________________________________________________________________________________ activation_9 (Activation) (None, 8, 8, 8, 256) 0 conv3d_9[0][0] __________________________________________________________________________________________________ conv3d_10 (Conv3D) (None, 8, 8, 8, 256) 1769728 activation_9[0][0] __________________________________________________________________________________________________ activation_10 (Activation) (None, 8, 8, 8, 256) 0 conv3d_10[0][0] __________________________________________________________________________________________________ up_sampling3d_2 (UpSampling3D) (None, 16, 16, 16, 2 0 activation_10[0][0] __________________________________________________________________________________________________ concatenate_2 (Concatenate) (None, 16, 16, 16, 3 0 up_sampling3d_2[0][0] activation_4[0][0] __________________________________________________________________________________________________ conv3d_11 (Conv3D) (None, 16, 16, 16, 1 1327232 concatenate_2[0][0] __________________________________________________________________________________________________ activation_11 (Activation) (None, 16, 16, 16, 1 0 conv3d_11[0][0] __________________________________________________________________________________________________ conv3d_12 (Conv3D) (None, 16, 16, 16, 1 442496 activation_11[0][0] __________________________________________________________________________________________________ activation_12 (Activation) (None, 16, 16, 16, 1 0 conv3d_12[0][0] __________________________________________________________________________________________________ up_sampling3d_3 (UpSampling3D) (None, 32, 32, 32, 1 0 activation_12[0][0] __________________________________________________________________________________________________ concatenate_3 (Concatenate) (None, 32, 32, 32, 1 0 up_sampling3d_3[0][0] activation_2[0][0] __________________________________________________________________________________________________ conv3d_13 (Conv3D) (None, 32, 32, 32, 6 331840 concatenate_3[0][0] __________________________________________________________________________________________________ activation_13 (Activation) (None, 32, 32, 32, 6 0 conv3d_13[0][0] __________________________________________________________________________________________________ conv3d_14 (Conv3D) (None, 32, 32, 32, 6 110656 activation_13[0][0] __________________________________________________________________________________________________ activation_14 (Activation) (None, 32, 32, 32, 6 0 conv3d_14[0][0] __________________________________________________________________________________________________ conv3d_15 (Conv3D) (None, 32, 32, 32, 1 65 activation_14[0][0] __________________________________________________________________________________________________ activation_15 (Activation) (None, 32, 32, 32, 1 0 conv3d_15[0][0] ================================================================================================== Total params: 16,318,177 Trainable params: 16,318,177 Non-trainable params: 0 __________________________________________________________________________________________________ None
from main_model import model_ from metrics import weighted_dice_coefficient_loss from keras.optimizers import Adam # model = model_(input_shape=config["input_shape"], depth=3, n_labels=1, # initial_learning_rate=0.00001, # n_base_filters=4) # model.compile(optimizer=Adam(lr=0.1), loss=weighted_dice_coefficient_loss) # loss = "binary_crossentropy", metrics = ["accuracy"] # print(model.summary()) from unet import unet_model_3d model=unet_model_3d(input_shape=config["input_shape"],n_labels=1) learning_rate_reduction = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=10, verbose=1, factor=0.5, min_lr=0.000001) history=model.fit(train_data,train_target,validation_data=(test_data,test_target),batch_size=1,verbose=1,initial_epoch=1,nb_epoch=10,callbacks=[learning_rate_reduction]) # model.save('trained_big.h5')
512 Model: "model_2" __________________________________________________________________________________________________ Layer (type) Output Shape Param # Connected to ================================================================================================== input_2 (InputLayer) (None, 32, 32, 32, 4 0 __________________________________________________________________________________________________ conv3d_16 (Conv3D) (None, 32, 32, 32, 3 3488 input_2[0][0] __________________________________________________________________________________________________ activation_16 (Activation) (None, 32, 32, 32, 3 0 conv3d_16[0][0] __________________________________________________________________________________________________ conv3d_17 (Conv3D) (None, 32, 32, 32, 6 55360 activation_16[0][0] __________________________________________________________________________________________________ activation_17 (Activation) (None, 32, 32, 32, 6 0 conv3d_17[0][0] __________________________________________________________________________________________________ max_pooling3d_4 (MaxPooling3D) (None, 16, 16, 16, 6 0 activation_17[0][0] __________________________________________________________________________________________________ conv3d_18 (Conv3D) (None, 16, 16, 16, 6 110656 max_pooling3d_4[0][0] __________________________________________________________________________________________________ activation_18 (Activation) (None, 16, 16, 16, 6 0 conv3d_18[0][0] __________________________________________________________________________________________________ conv3d_19 (Conv3D) (None, 16, 16, 16, 1 221312 activation_18[0][0] __________________________________________________________________________________________________ activation_19 (Activation) (None, 16, 16, 16, 1 0 conv3d_19[0][0] __________________________________________________________________________________________________ max_pooling3d_5 (MaxPooling3D) (None, 8, 8, 8, 128) 0 activation_19[0][0] __________________________________________________________________________________________________ conv3d_20 (Conv3D) (None, 8, 8, 8, 128) 442496 max_pooling3d_5[0][0] __________________________________________________________________________________________________ activation_20 (Activation) (None, 8, 8, 8, 128) 0 conv3d_20[0][0] __________________________________________________________________________________________________ conv3d_21 (Conv3D) (None, 8, 8, 8, 256) 884992 activation_20[0][0] __________________________________________________________________________________________________ activation_21 (Activation) (None, 8, 8, 8, 256) 0 conv3d_21[0][0] __________________________________________________________________________________________________ max_pooling3d_6 (MaxPooling3D) (None, 4, 4, 4, 256) 0 activation_21[0][0] __________________________________________________________________________________________________ conv3d_22 (Conv3D) (None, 4, 4, 4, 256) 1769728 max_pooling3d_6[0][0] __________________________________________________________________________________________________ activation_22 (Activation) (None, 4, 4, 4, 256) 0 conv3d_22[0][0] __________________________________________________________________________________________________ conv3d_23 (Conv3D) (None, 4, 4, 4, 512) 3539456 activation_22[0][0] __________________________________________________________________________________________________ activation_23 (Activation) (None, 4, 4, 4, 512) 0 conv3d_23[0][0] __________________________________________________________________________________________________ up_sampling3d_4 (UpSampling3D) (None, 8, 8, 8, 512) 0 activation_23[0][0] __________________________________________________________________________________________________ concatenate_4 (Concatenate) (None, 8, 8, 8, 768) 0 up_sampling3d_4[0][0] activation_21[0][0] __________________________________________________________________________________________________ conv3d_24 (Conv3D) (None, 8, 8, 8, 256) 5308672 concatenate_4[0][0] __________________________________________________________________________________________________ activation_24 (Activation) (None, 8, 8, 8, 256) 0 conv3d_24[0][0] __________________________________________________________________________________________________ conv3d_25 (Conv3D) (None, 8, 8, 8, 256) 1769728 activation_24[0][0] __________________________________________________________________________________________________ activation_25 (Activation) (None, 8, 8, 8, 256) 0 conv3d_25[0][0] __________________________________________________________________________________________________ up_sampling3d_5 (UpSampling3D) (None, 16, 16, 16, 2 0 activation_25[0][0] __________________________________________________________________________________________________ concatenate_5 (Concatenate) (None, 16, 16, 16, 3 0 up_sampling3d_5[0][0] activation_19[0][0] __________________________________________________________________________________________________ conv3d_26 (Conv3D) (None, 16, 16, 16, 1 1327232 concatenate_5[0][0] __________________________________________________________________________________________________ activation_26 (Activation) (None, 16, 16, 16, 1 0 conv3d_26[0][0] __________________________________________________________________________________________________ conv3d_27 (Conv3D) (None, 16, 16, 16, 1 442496 activation_26[0][0] __________________________________________________________________________________________________ activation_27 (Activation) (None, 16, 16, 16, 1 0 conv3d_27[0][0] __________________________________________________________________________________________________ up_sampling3d_6 (UpSampling3D) (None, 32, 32, 32, 1 0 activation_27[0][0] __________________________________________________________________________________________________ concatenate_6 (Concatenate) (None, 32, 32, 32, 1 0 up_sampling3d_6[0][0] activation_17[0][0] __________________________________________________________________________________________________ conv3d_28 (Conv3D) (None, 32, 32, 32, 6 331840 concatenate_6[0][0] __________________________________________________________________________________________________ activation_28 (Activation) (None, 32, 32, 32, 6 0 conv3d_28[0][0] __________________________________________________________________________________________________ conv3d_29 (Conv3D) (None, 32, 32, 32, 6 110656 activation_28[0][0] __________________________________________________________________________________________________ activation_29 (Activation) (None, 32, 32, 32, 6 0 conv3d_29[0][0] __________________________________________________________________________________________________ conv3d_30 (Conv3D) (None, 32, 32, 32, 1 65 activation_29[0][0] __________________________________________________________________________________________________ activation_30 (Activation) (None, 32, 32, 32, 1 0 conv3d_30[0][0] ================================================================================================== Total params: 16,318,177 Trainable params: 16,318,177 Non-trainable params: 0 __________________________________________________________________________________________________ None
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:23: UserWarning: The `nb_epoch` argument in `fit` has been renamed `epochs`.
WARNING:tensorflow:From /usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py:422: The name tf.global_variables is deprecated. Please use tf.compat.v1.global_variables instead. Train on 285 samples, validate on 98 samples Epoch 2/10 285/285 [==============================] - 89s 312ms/step - loss: -0.2476 - dice_coefficient: 0.2476 - val_loss: -0.3466 - val_dice_coefficient: 0.3466 Epoch 3/10 285/285 [==============================] - 82s 287ms/step - loss: -0.3296 - dice_coefficient: 0.3296 - val_loss: -0.3569 - val_dice_coefficient: 0.3569 Epoch 4/10 285/285 [==============================] - 82s 287ms/step - loss: -0.3443 - dice_coefficient: 0.3443 - val_loss: -0.3861 - val_dice_coefficient: 0.3861 Epoch 5/10 285/285 [==============================] - 81s 286ms/step - loss: -0.3598 - dice_coefficient: 0.3598 - val_loss: -0.4107 - val_dice_coefficient: 0.4107 Epoch 6/10 285/285 [==============================] - 81s 286ms/step - loss: -0.4070 - dice_coefficient: 0.4070 - val_loss: -0.5036 - val_dice_coefficient: 0.5036 Epoch 7/10 285/285 [==============================] - 81s 285ms/step - loss: -0.5312 - dice_coefficient: 0.5312 - val_loss: -0.6344 - val_dice_coefficient: 0.6344 Epoch 8/10 285/285 [==============================] - 81s 284ms/step - loss: -0.6028 - dice_coefficient: 0.6028 - val_loss: -0.6938 - val_dice_coefficient: 0.6938 Epoch 9/10 285/285 [==============================] - 81s 284ms/step - loss: -0.6598 - dice_coefficient: 0.6598 - val_loss: -0.6725 - val_dice_coefficient: 0.6725 Epoch 10/10 285/285 [==============================] - 81s 283ms/step - loss: -0.7233 - dice_coefficient: 0.7233 - val_loss: -0.7054 - val_dice_coefficient: 0.7054
model.evaluate(test_data,test_target)
98/98 [==============================] - 11s 113ms/step
[-0.753577535249749, 0.8017180562019348]
# history=model.fit(train_data,train_target,validation_data=(test_data,test_target),batch_size=1,verbose=1,initial_epoch=10,nb_epoch=50,callbacks=[learning_rate_reduction])
Train on 285 samples, validate on 98 samples Epoch 11/50 1/285 [..............................] - ETA: 38s - loss: -0.3105
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:1: UserWarning: The `nb_epoch` argument in `fit` has been renamed `epochs`. """Entry point for launching an IPython kernel.
285/285 [==============================] - 21s 75ms/step - loss: -0.3671 - val_loss: -0.2720 Epoch 12/50 285/285 [==============================] - 22s 76ms/step - loss: -0.3743 - val_loss: -0.2380 Epoch 13/50 285/285 [==============================] - 21s 75ms/step - loss: -0.3748 - val_loss: -0.1618 Epoch 14/50 285/285 [==============================] - 21s 75ms/step - loss: -0.3805 - val_loss: -0.2399 Epoch 15/50 285/285 [==============================] - 21s 75ms/step - loss: -0.3875 - val_loss: -0.1963 Epoch 16/50 285/285 [==============================] - 21s 75ms/step - loss: -0.3833 - val_loss: -0.1810 Epoch 17/50 285/285 [==============================] - 21s 75ms/step - loss: -0.3952 - val_loss: -0.1649 Epoch 18/50 285/285 [==============================] - 21s 75ms/step - loss: -0.3990 - val_loss: -0.1757 Epoch 19/50 285/285 [==============================] - 21s 75ms/step - loss: -0.3999 - val_loss: -0.1627 Epoch 20/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4045 - val_loss: -0.2240 Epoch 21/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4072 - val_loss: -0.1508 Epoch 00021: ReduceLROnPlateau reducing learning rate to 0.05000000074505806. Epoch 22/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4212 - val_loss: -0.2022 Epoch 23/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4282 - val_loss: -0.1755 Epoch 24/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4294 - val_loss: -0.1609 Epoch 25/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4351 - val_loss: -0.1766 Epoch 26/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4373 - val_loss: -0.1744 Epoch 27/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4375 - val_loss: -0.1598 Epoch 28/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4428 - val_loss: -0.1651 Epoch 29/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4446 - val_loss: -0.1542 Epoch 30/50 285/285 [==============================] - 21s 75ms/step - loss: -0.4511 - val_loss: -0.1899 Epoch 31/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5053 - val_loss: -0.1737 Epoch 00031: ReduceLROnPlateau reducing learning rate to 0.02500000037252903. Epoch 32/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5425 - val_loss: -0.1700 Epoch 33/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5483 - val_loss: -0.1592 Epoch 34/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5633 - val_loss: -0.1618 Epoch 35/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5580 - val_loss: -0.1673 Epoch 36/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5702 - val_loss: -0.1743 Epoch 37/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5727 - val_loss: -0.1746 Epoch 38/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5762 - val_loss: -0.1982 Epoch 39/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5862 - val_loss: -0.1746 Epoch 40/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5938 - val_loss: -0.1728 Epoch 41/50 285/285 [==============================] - 21s 75ms/step - loss: -0.5935 - val_loss: -0.1590 Epoch 00041: ReduceLROnPlateau reducing learning rate to 0.012500000186264515. Epoch 42/50 285/285 [==============================] - 21s 74ms/step - loss: -0.6060 - val_loss: -0.1616 Epoch 43/50 285/285 [==============================] - 21s 75ms/step - loss: -0.6131 - val_loss: -0.1680 Epoch 44/50 285/285 [==============================] - 21s 75ms/step - loss: -0.6143 - val_loss: -0.1605 Epoch 45/50 285/285 [==============================] - 21s 75ms/step - loss: -0.6227 - val_loss: -0.1719 Epoch 46/50 285/285 [==============================] - 21s 75ms/step - loss: -0.6228 - val_loss: -0.1675 Epoch 47/50 285/285 [==============================] - 21s 75ms/step - loss: -0.6266 - val_loss: -0.1652 Epoch 48/50 285/285 [==============================] - 21s 75ms/step - loss: -0.6276 - val_loss: -0.1752 Epoch 49/50 285/285 [==============================] - 21s 75ms/step - loss: -0.6259 - val_loss: -0.1678 Epoch 50/50 285/285 [==============================] - 21s 75ms/step - loss: -0.6308 - val_loss: -0.1855
import matplotlib.pyplot as plt def plot_model_history(model_history): fig, axs = plt.subplots(1,2,figsize=(15,5)) # summarize history for accuracy # summarize history for loss axs[0].plot(range(1,len(model_history.history['loss'])+1),model_history.history['loss']) axs[0].plot(range(1,len(model_history.history['val_loss'])+1),model_history.history['val_loss']) axs[0].set_title('Model Loss') axs[0].set_ylabel('Loss') axs[0].set_xlabel('Epoch') axs[0].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model_history.history['loss'])/10) axs[0].legend(['train', 'val'], loc='best') axs[1].plot(range(1,len(model_history.history['dice_coefficient'])+1),model_history.history['dice_coefficient']) axs[1].plot(range(1,len(model_history.history['val_dice_coefficient'])+1),model_history.history['val_dice_coefficient']) axs[1].set_title('Model Dice Coefficient') axs[1].set_ylabel('Dice Coefficient') axs[1].set_xlabel('Epoch') axs[1].set_xticks(np.arange(1,len(model_history.history['loss'])+1),len(model_history.history['loss'])/10) axs[1].legend(['train Dice Coefficient', 'val Dice Coefficient'], loc='best') plt.show() plot_model_history(history)
/usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:12: MatplotlibDeprecationWarning: Passing the minor parameter of set_xticks() positionally is deprecated since Matplotlib 3.2; the parameter will become keyword-only two minor releases later. if sys.path[0] == '': /usr/local/lib/python3.6/dist-packages/ipykernel_launcher.py:20: MatplotlibDeprecationWarning: Passing the minor parameter of set_xticks() positionally is deprecated since Matplotlib 3.2; the parameter will become keyword-only two minor releases later.
sample_test=test_data[0] sample_test.shape resample_data=sample_test[np.newaxis] resample_data.shape response=model.predict(resample_data) out_resp=response[0] out_l=out_resp[:,:,:,0]*4 real=test_target[0][:,:,:,0]
test_data.shape
(98, 32, 32, 32, 4)
sample_test=test_data[0:9]
sample_test.shape
(9, 32, 32, 32, 4)
type(sample_test)
numpy.ndarray
resample_data=sample_test[np.newaxis]
resample_data.shape
(1, 32, 32, 32, 4)
response=model.predict(sample_test)
response.shape
(9, 32, 32, 32, 1)
out_resp=response[4]
out_resp.shape
(32, 32, 32, 1)
out_l=out_resp[:,:,:,0]
out_l.shape
(32, 32, 32)
test_target.shape
(98, 32, 32, 32, 1)
real=test_target[4] real.shape
(32, 32, 32, 1)
real_l=real[:,:,:,0]
real_l.shape
(32, 32, 32)
import nibabel import numpy as np import random import os import SimpleITK as sitk import pickle from scipy import ndimage def save_to_nii(im, filename, outdir="", mode="image", system="sitk"): if system == "sitk": if mode == 'label': img = sitk.GetImageFromArray(im.astype(np.uint8)) else: img = sitk.GetImageFromArray(im.astype(np.float32)) if not os.path.exists("./{}".format(outdir)): os.mkdir("./{}".format(outdir)) sitk.WriteImage(img, "./{}/{}.nii.gz".format(outdir, filename)) else: img = np.rot90(im, k=2, axes= (1,2)) OUTPUT_AFFINE = np.array( [[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 0, 1]]) if mode == 'label': img = nibabel.Nifti1Image(img.astype(np.uint8), OUTPUT_AFFINE) else: img = nibabel.Nifti1Image(img.astype(np.float32), OUTPUT_AFFINE) if not os.path.exists("./{}".format(outdir)): os.mkdir("./{}".format(outdir)) nibabel.save(img, "./{}/{}.nii.gz".format(outdir, filename))
sample_test2=test_data[7] resample_data2=sample_test[np.newaxis] response2=model.predict(resample_data) out_resp2=response2[0] out_l2=out_resp2[:,:,:,0] real2=test_target[4][:,:,:,0]
plotting.plot_stat_map('out/3.nii.gz')
<nilearn.plotting.displays.OrthoSlicer at 0x7f09c22acb38>