Contents

Last modified: 2021-11-04 15:18:10
Compiled: 2021-11-04 15:19:04

1 Getting started

1.1 Load packages

library(ExperimentHub)
library(rMiW)
library(EBImage)
library(keras)

packageVersion("keras")
## [1] '2.6.1'
packageVersion("tensorflow")
## [1] '2.6.0'

1.2 Optional: Update (ver 3.14)

BiocManager::install(version = "3.14")

1.3 Optional: Python environment on R

#On MacOSX
library(reticulate)
#reticulate::install_miniconda(force = T)
#reticulate::use_python("~/Library/r-miniconda/envs/r-reticulate/bin/python")
reticulate::py_config()

#install pydot
#reticulate::py_install("pydot")

#On CentOS
library(reticulate)
#reticulate::install_miniconda(force = T)
#reticulate::use_python("~/.local/share/r-miniconda/envs/r-reticulate/bin/python")
reticulate::py_config()

1.4 Optional: install python packages for R keras / tensorflow (ver 3.14)

#For CPU
keras::install_keras()

1.5 Optional: Removes all files in the cache directory

install.packages("R.cache")
#system("open ~/Library/Caches/org.R-project.R/R/")
R.cache::clearCache("~/Library/Caches/org.R-project.R/R/ExperimentHub")

1.6 Optional: extract R script from the Rmd file

knitr::purl("./rMiW/vignettes/rMiW_02_BioImageDbs.Rmd", output="./rMiW/vignettes/rMiW_02_BioImageDbs.R")

2 Obtain 2D image dataset via BioImageDbs

About the BioImageDbs package, please ckeck Providing Bioimage Dataset for ExperimentHub document for more information.

Please check the metadata (CSV) of BioImageDbs in GitHub.

#Description: Providing Bioimage Dataset for ExperimentHub
browseURL("https://bioconductor.org/packages/release/data/experiment/vignettes/BioImageDbs/inst/doc/BioImageDbs.html")

#Metadata (CSV) for BioImageDbs
browseURL("https://github.com/kumeS/BioImageDbs/blob/main/inst/extdata/v02/metadata_v02.csv")

2.1 Search query for the BioImageDbs

Via the ExperimentHub function, we can obtain the supervised image data as a list of R arrays and their metadata.

Here shows an example of a search query for the BioImageDbs (Currently, snapshotDate(): 2021-10-18 for version 3.14).

#Set the ExperimentHub function
eh <- ExperimentHub::ExperimentHub()
## snapshotDate(): 2021-10-18
#All entities of BioImageDbs
AnnotationHub::query(eh, c("BioImageDbs"))
## ExperimentHub with 96 records
## # snapshotDate(): 2021-10-18
## # $dataprovider: Satoshi Kume <satoshi.kume.1984@gmail.com>, CELL TRACKING C...
## # $species: Mus musculus, Homo sapiens, Rattus norvegicus, Drosophila melano...
## # $rdataclass: List, magick-image
## # additional mcols(): taxonomyid, genome, description,
## #   coordinate_1_based, maintainer, rdatadateadded, preparerclass, tags,
## #   rdatapath, sourceurl, sourcetype 
## # retrieve records with, e.g., 'object[["EH6851"]]' 
## 
##            title                                                            
##   EH6851 | EM_id0001_Brain_CA1_hippocampus_region_5dTensor.rds              
##   EH6852 | EM_id0001_Brain_CA1_hippocampus_region_5dTensor_train_dataset.gif
##   EH6853 | EM_id0002_Drosophila_brain_region_5dTensor.rds                   
##   EH6854 | EM_id0002_Drosophila_brain_region_5dTensor_train_dataset.gif     
##   EH6855 | LM_id0001_DIC_C2DH_HeLa_4dTensor.rds                             
##   ...      ...                                                              
##   EH6942 | EM_id0009_MurineBMMC_All_512_4dTensor_dataset.gif                
##   EH6943 | EM_id0010_HumanBlast_All_512_4dTensor.Rds                        
##   EH6944 | EM_id0010_HumanBlast_All_512_4dTensor_dataset.gif                
##   EH6945 | EM_id0011_HumanJurkat_All_512_4dTensor.Rds                       
##   EH6946 | EM_id0011_HumanJurkat_All_512_4dTensor_dataset.gif
#Query with LM_id0001 (Light Microscopy ID 0001)
AnnotationHub::query(eh, c("BioImageDbs", "LM_id0001"))
## ExperimentHub with 10 records
## # snapshotDate(): 2021-10-18
## # $dataprovider: CELL TRACKING CHALLENGE (http://celltrackingchallenge.net/2...
## # $species: Homo sapiens
## # $rdataclass: List, magick-image
## # additional mcols(): taxonomyid, genome, description,
## #   coordinate_1_based, maintainer, rdatadateadded, preparerclass, tags,
## #   rdatapath, sourceurl, sourcetype 
## # retrieve records with, e.g., 'object[["EH6855"]]' 
## 
##            title                                                    
##   EH6855 | LM_id0001_DIC_C2DH_HeLa_4dTensor.rds                     
##   EH6856 | LM_id0001_DIC_C2DH_HeLa_4dTensor_train_dataset.gif       
##   EH6857 | LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.rds              
##   EH6858 | LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary_train_dataset.gif
##   EH6859 | LM_id0001_DIC_C2DH_HeLa_5dTensor.rds                     
##   EH6878 | LM_id0001_DIC_C2DH_HeLa_4dTensor.Rds                     
##   EH6879 | LM_id0001_DIC_C2DH_HeLa_4dTensor_train_dataset.gif       
##   EH6880 | LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.Rds              
##   EH6881 | LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary_train_dataset.gif
##   EH6882 | LM_id0001_DIC_C2DH_HeLa_5dTensor.Rds
#check 4d tensor of LM_id0001
(qr <- AnnotationHub::query(eh, c("BioImageDbs", "LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary")))
## ExperimentHub with 4 records
## # snapshotDate(): 2021-10-18
## # $dataprovider: CELL TRACKING CHALLENGE (http://celltrackingchallenge.net/2...
## # $species: Homo sapiens
## # $rdataclass: magick-image, List
## # additional mcols(): taxonomyid, genome, description,
## #   coordinate_1_based, maintainer, rdatadateadded, preparerclass, tags,
## #   rdatapath, sourceurl, sourcetype 
## # retrieve records with, e.g., 'object[["EH6857"]]' 
## 
##            title                                                    
##   EH6857 | LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.rds              
##   EH6858 | LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary_train_dataset.gif
##   EH6880 | LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.Rds              
##   EH6881 | LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary_train_dataset.gif
#Select their metadata using `qr$`
#show title
qr$title
## [1] "LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.rds"              
## [2] "LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary_train_dataset.gif"
## [3] "LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.Rds"              
## [4] "LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary_train_dataset.gif"
#show description
qr$description[3]
## [1] "4D arrays with the binary labels for the image segmentation. Human HeLa cells on a flat glass. The original dataset for 2D segmentation was provided by Dr. G. van Cappellen, Erasmus Medical Center, Rotterdam, The Netherlands. The dataset images were captured by the Zeiss LSM 510 Meta with the Objective lens, Plan-Apochromat 63x/1.4 (oil). The captured region was 0.19 x 0.19 microns, and the pixel size was 512x512 pixels per image. The time step was 10 min. To read them as the Rds file, the original tif images were converted to the PNG images with 8-bit/16-bit grey scale using ImageJ/Fiji."

Note: small .rds data does not work. They will be removed in future.

2.2 Optional: Download from Google Drive

file <- system.file("script", "gdrive_download.sh", package="rMiW")
system(paste0("source ", file, " ; gdrive_download 1J-wR0icTCpFgeKPP0iF4cyzD-b1m3tOO ./output.Rds"))
ImgData <- readRDS("output.Rds")
str(ImgData)

https://drive.google.com/file/d//view?usp=sharing

2.3 Acquire the image arrays

We use [] to access its metadata while [[]] to get its data instance.

We could load from cache (~/Library/Caches/org.R-project.R/R/) once the data was downloaded.

#Access metadata
qr[3]
## ExperimentHub with 1 record
## # snapshotDate(): 2021-10-18
## # names(): EH6880
## # package(): BioImageDbs
## # $dataprovider: CELL TRACKING CHALLENGE (http://celltrackingchallenge.net/2...
## # $species: Homo sapiens
## # $rdataclass: List
## # $rdatadateadded: 2021-05-18
## # $title: LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.Rds
## # $description: 4D arrays with the binary labels for the image segmentation....
## # $taxonomyid: 9606
## # $genome: NA
## # $sourcetype: PNG
## # $sourceurl: https://github.com/kumeS/BioImageDbs
## # $sourcesize: NA
## # $tags: c("bioimage", "cell tracking", "CellCulture", "microscope",
## #   "segmentation", "Tissue") 
## # retrieve record with 'object[["EH6880"]]'
#Show metadata 
qr[3]$title
## [1] "LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.Rds"
qr[3]$description
## [1] "4D arrays with the binary labels for the image segmentation. Human HeLa cells on a flat glass. The original dataset for 2D segmentation was provided by Dr. G. van Cappellen, Erasmus Medical Center, Rotterdam, The Netherlands. The dataset images were captured by the Zeiss LSM 510 Meta with the Objective lens, Plan-Apochromat 63x/1.4 (oil). The captured region was 0.19 x 0.19 microns, and the pixel size was 512x512 pixels per image. The time step was 10 min. To read them as the Rds file, the original tif images were converted to the PNG images with 8-bit/16-bit grey scale using ImageJ/Fiji."
#Download the dataset of LM_id0001 (LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.rds)
ImgData <- qr[[3]]
## see ?BioImageDbs and browseVignettes('BioImageDbs') for documentation
## loading from cache
str(ImgData)
## List of 2
##  $ Train:List of 2
##   ..$ Train_Original          : num [1:84, 1:512, 1:512, 1] 0.518 0.455 0.455 0.447 0.439 ...
##   ..$ Train_GroundTruth_Binary: num [1:84, 1:512, 1:512, 1] 0 0 0 0 0 0 0 0 0 0 ...
##  $ Test :List of 2
##   ..$ Test_Original          : num [1:84, 1:512, 1:512, 1] 0.604 0.467 0.459 0.435 0.408 ...
##   ..$ Test_GroundTruth_Binary: num [1:84, 1:512, 1:512, 1] 0 1 1 1 1 1 1 0 0 0 ...
#List of 2
# $ Train:List of 2
#  ..$ Train_Original          : num [1:84, 1:512, 1:512, 1] 0.518 0.455 0.455 0.447 0.439 ...
#  ..$ Train_GroundTruth_Binary: num [1:84, 1:512, 1:512, 1] 0 0 0 0 0 0 0 0 0 0 ...
# $ Test :List of 2
#  ..$ Test_Original          : num [1:84, 1:512, 1:512, 1] 0.604 0.467 0.459 0.435 0.408 ...
#  ..$ Test_GroundTruth_Binary: num [1:84, 1:512, 1:512, 1] 0 1 1 1 1 1 1 0 0 0 ...

#show an image
EBImage::display(EBImage::Image(ImgData$Train$Train_Original[1,,,]),
                 method="raster")

LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.Rds is a list of 4D arrays with the binary labels for the image segmentation of Human HeLa cells on a flat glass.

2.4 Show the gif animation

Here we will get a gif animation and check the result of data visualization.

#Access metadata
qr[2]
## ExperimentHub with 1 record
## # snapshotDate(): 2021-10-18
## # names(): EH6858
## # package(): BioImageDbs
## # $dataprovider: CELL TRACKING CHALLENGE (http://celltrackingchallenge.net/2...
## # $species: Homo sapiens
## # $rdataclass: magick-image
## # $rdatadateadded: 2021-05-18
## # $title: LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary_train_dataset.gif
## # $description: A animation file (.gif) of the train dataset of LM_id0001_DI...
## # $taxonomyid: 9606
## # $genome: NA
## # $sourcetype: PNG
## # $sourceurl: https://github.com/kumeS/BioImageDbs
## # $sourcesize: NA
## # $tags: c("animation", "bioimage", "cell tracking", "CellCulture",
## #   "microscope", "segmentation", "Tissue") 
## # retrieve record with 'object[["EH6858"]]'
#show metadata
qr[2]$title
## [1] "LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary_train_dataset.gif"
qr[2]$description
## [1] "A animation file (.gif) of the train dataset of LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.rds."
#Get gif animation
GifData <- qr[[2]]
## see ?BioImageDbs and browseVignettes('BioImageDbs') for documentation
## loading from cache
str(GifData)  # Data path
##  Named chr "/Users/skume/Library/Caches/org.R-project.R/R/ExperimentHub/8ac541541a28_6907"
##  - attr(*, "names")= chr "EH6858"
magick::image_read(GifData)

LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary_train_dataset.gif is an animation file (.gif) of the train dataset of LM_id0001_DIC_C2DH_HeLa_4dTensor_Binary.rds

Currently, only magick::image_read is supported to view gif animation files.

3 Image Segmentation for cell division images with two class / binary class

3.1 Check dimensions of images

We will use dimensions of images (Width, Height, Channel(Gray)) for the model construction.

#Dimensions of ImgData
#Image number, Width, Height, Channel(Gray)
str(ImgData)
dim(ImgData$Train$Train_Original)

#Use Width, Height, Channel(Gray)
ImgShape <- dim(ImgData$Train$Train_Original)[-1]
ImgShape
#[1] 512 512   1

3.2 Create an U-NET-based model

We will make the U-Net model with dropout layers.

model <- rMiW::unet2D_v01(shape = ImgShape)
model

3.3 Vidualize the model

Here visualizes the U-NET network.

rMiW::plot_model(model=model)

#OR
#use plot_model in tensorflow
rMiW::Py_plot_model(model=model)
EBImage::display(EBImage::readImage("Model.png"))
#Alternatively, perform this if do not work above.
source("https://gist.githubusercontent.com/kumeS/41fed511efb45bd55d468d4968b0f157/raw/b7205c6285422e5166f70b770e1e8674d65f5ea2/DL_plot_modi_v1.2.R")
plot_model_modi(model=model)

4 Set compile parameters for

4.1 Compile the model

Here we will choose the optimizer and loss function.

model <- model %>%
     keras::compile(
       optimizer = keras::optimizer_rmsprop(learning_rate = 0.01),
       loss = rMiW::bce_dice_loss,
       metrics = rMiW::dice_coef
     )
  • Parameters:
    • optimizer: optimizer instance / (最適化アルゴリズム)
      • learning_rate: Learning rate (float >= 0) / 学習率
    • loss: Objective function / Loss function / 損失関数 (評価指標)
    • metrics: Evaluated function / 評価関数

Check the reference sheet in keras.rstudio.com:

4.2 Fit the model using 20 images for training

We should use drop=F to avoid any change of array shape.

#Create Train Data
X <- ImgData$Train$Train_Original[1:20,,,,drop=FALSE]
str(X)
Y <- ImgData$Train$Train_GroundTruth_Binary[1:20,,,,drop=FALSE]
str(Y)

history <- model %>%
  keras::fit(x = X, 
             y = Y,
             batch_size = 2,
             epochs = 2,
             verbose = 1)
  • Parameters:
    • batch_size: Number of samples per gradient update / 1度に計算するサンプル数
    • epochs: Number of epochs to train the model / エポック数:一つの訓練データを何回繰り返して学習させるか
    • verbose: Verbosity mode (0 = silent, 1 = progress bar, 2 = one line per epoch) / 表示モード
  • Training Speed (3rd Nov 2021):
    • Orchestra: 15.7 s/step
    • MacOSX (2.3 GHz quad-core Intel Core i7, KUME): 2.15 s/step (7.3-fold faster)
    • GPU (Quadro RTX 8000): 51 ms/step (307.8-fold faster)
keras::fit process: 50 epochs (model_v01.h5)

Figure 1: keras::fit process: 50 epochs (model_v01.h5)

keras::fit process: 2000 epochs with GPU (model_v02.h5)

Figure 2: keras::fit process: 2000 epochs with GPU (model_v02.h5)

4.3 Save the model by save_model_hdf5()

model %>% 
  keras::save_model_hdf5("model_v01.h5")

#Model weights as R arrays
keras::get_weights(model)[[1]]

The save_model_hdf5 function can save all information of the model; the weight values, the model’s configuration(architecture), and the optimizer configuration.

4.4 Re-load, re-compile and re-fit

We will load the saved model and run compile and fit.

We can see that the calculation is done from the continuation.

4.4.1 model_v01.h5 (training: 60 epochs)

#Re-read model
file <- system.file("extdata", "model_v01.h5", package="rMiW")

#Re-load
modelR <- keras::load_model_hdf5(file, compile=F)
summary(modelR)
keras::get_weights(modelR)[[1]]

#Re-compile
modelR <- modelR %>%
     keras::compile(
       optimizer = keras::optimizer_rmsprop(learning_rate = 0.01),
       loss = rMiW::bce_dice_loss,
       metrics = rMiW::dice_coef
     )

#Re-fit: Do not re-fit in this section
if(F){
history <- modelR %>%
  keras::fit(x = X, 
             y = Y,
             batch_size = 2,
             epochs = 1,
             verbose = 1)
}

4.4.2 model_v02.h5 (training: 2000 epochs)

#Re-read model
file <- system.file("extdata", "model_v02.h5", package="rMiW")

#Re-load
modelR2 <- keras::load_model_hdf5(file, compile=F)
summary(modelR2)
keras::get_weights(modelR2)[[1]]

#Re-compile
modelR2 <- modelR2 %>%
     keras::compile(
       optimizer = keras::optimizer_rmsprop(learning_rate = 0.01),
       loss = rMiW::bce_dice_loss,
       metrics = rMiW::dice_coef
     )

5 Model evaluation of model_v01.h5

Here we evaluate the model object using keras::evaluate function.

## Model evaluation
Score <- modelR %>% 
  keras::evaluate(X,
                  Y, 
                  verbose = 1)

cat(paste0('Train loss:', round(Score[[1]], 4), 
           '\nTrain accuracy:', round(Score[[2]], 4)))

#model_v01 (training: 60 epochs)
#Train loss:1.3279
#Train accuracy:0.8672

5.1 Model prediction at image pixel level

The model is used to predict the binarization at the pixel level.

Y_pred <- rMiW::model.pred(model=modelR, 
                           x=X)

5.2 Visualization of training results

We use ImageView2D function for the visualization.

for(n in 1:20){
#n <- 2
rMiW::ImageView2D(X,
            Y_pred,
            ImgN=n)
}

We can visualize the results using another function.

#Imge: 2
ImageView2D_pred(ImgArray_x=X,
                 ImgArray_y=Y,
                 ImgArray_pred=Y_pred,
                 ImgN=2)

#Imge: 6
ImageView2D_pred(ImgArray_x=X,
                 ImgArray_y=Y,
                 ImgArray_pred=Y_pred,
                 ImgN=6)

#Image: All
for(N in 1:20){
ImageView2D_pred(ImgArray_x=X,
                 ImgArray_y=Y,
                 ImgArray_pred=Y_pred,
                 ImgN=N)
}

5.3 Prediction for test dataset (20 images)

#Data
Test_X <- ImgData$Train$Train_Original[21:40,,,,drop=FALSE]
str(Test_X)
Test_Y <- ImgData$Train$Train_GroundTruth_Binary[21:40,,,,drop=FALSE]
str(Test_Y)

## Model evaluation
Score <- modelR %>% 
  keras::evaluate(Test_X, 
                  Test_Y, 
                  verbose = 1)

cat(paste0('Train loss:', round(Score[[1]], 4), 
           '\nTrain accuracy:', round(Score[[2]], 4)))

#model_v01 (training: 60 epochs)
#Train loss:1.1545
#Train accuracy:0.8758

5.4 Visualization of test results

We use ImageView2D function for the visualization.

Test_Y_pred <- rMiW::model.pred(model=modelR, 
                                x=Test_X)

#visualization
for(N in 1:20){
ImageView2D_pred(ImgArray_x=Test_X,
                 ImgArray_y=Test_Y,
                 ImgArray_pred=Test_Y_pred,
                 ImgN=N)
}

6 Model evaluation of model_v02.h5

Here we evaluate the model object using keras::evaluate function.

## Model evaluation
Score <- modelR2 %>% 
  keras::evaluate(X,
                  Y, 
                  verbose = 1)

cat(paste0('Train loss:', round(Score[[1]], 4), 
           '\nTrain accuracy:', round(Score[[2]], 4)))

#model_v02 (training: 2000 epochs)
#Train loss:0.0051
#Train accuracy:0.9978

6.1 Model prediction at image pixel level

The model is used to predict the binarization at the pixel level.

Y_pred <- rMiW::model.pred(model=modelR2, 
                           x=X)

6.2 Visualization of results

We use ImageView2D function for the visualization.

for(n in 1:20){
#n <- 2
rMiW::ImageView2D(X,
            Y_pred,
            ImgN=n)
}

We can visualize the results using another function.

#Imge: 2
ImageView2D_pred(ImgArray_x=X,
                 ImgArray_y=Y,
                 ImgArray_pred=Y_pred,
                 ImgN=2)

#Imge: 6
ImageView2D_pred(ImgArray_x=X,
                 ImgArray_y=Y,
                 ImgArray_pred=Y_pred,
                 ImgN=6)

#Image: All
for(N in 1:20){
ImageView2D_pred(ImgArray_x=X,
                 ImgArray_y=Y,
                 ImgArray_pred=Y_pred,
                 ImgN=N)
}

6.3 Prediction for test dataset (20 images)

#Data
Test_X <- ImgData$Train$Train_Original[21:40,,,,drop=FALSE]
str(Test_X)
Test_Y <- ImgData$Train$Train_GroundTruth_Binary[21:40,,,,drop=FALSE]
str(Test_Y)

## Model evaluation
Score <- modelR2 %>% 
  keras::evaluate(Test_X, 
                  Test_Y, 
                  verbose = 1)

cat(paste0('Train loss:', round(Score[[1]], 4), 
           '\nTrain accuracy:', round(Score[[2]], 4)))

#model_v01 (training: 2000 epochs)
#Train loss:0.8893
#Train accuracy:0.9292

6.4 Visualization of test results

We use ImageView2D function for the visualization.

Test_Y_pred <- rMiW::model.pred(model=modelR2, 
                                x=Test_X)

#visualization
for(N in 1:20){
ImageView2D_pred(ImgArray_x=Test_X,
                 ImgArray_y=Test_Y,
                 ImgArray_pred=Test_Y_pred,
                 ImgN=N)
}

Session information

## R version 4.1.1 (2021-08-10)
## Platform: x86_64-apple-darwin17.0 (64-bit)
## Running under: macOS Catalina 10.15.7
## 
## Matrix products: default
## BLAS:   /Library/Frameworks/R.framework/Versions/4.1/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/4.1/Resources/lib/libRlapack.dylib
## 
## locale:
## [1] ja_JP.UTF-8/ja_JP.UTF-8/ja_JP.UTF-8/C/ja_JP.UTF-8/ja_JP.UTF-8
## 
## attached base packages:
## [1] stats     graphics  grDevices utils     datasets  methods   base     
## 
## other attached packages:
##  [1] BioImageDbs_1.2.0   keras_2.6.1         EBImage_4.36.0     
##  [4] rMiW_0.99.4         ExperimentHub_2.2.0 AnnotationHub_3.2.0
##  [7] BiocFileCache_2.2.0 dbplyr_2.1.1        BiocGenerics_0.40.0
## [10] BiocStyle_2.22.0   
## 
## loaded via a namespace (and not attached):
##  [1] bitops_1.0-7                  bit64_4.0.5                  
##  [3] filelock_1.0.2                RColorBrewer_1.1-2           
##  [5] httr_1.4.2                    GenomeInfoDb_1.30.0          
##  [7] tools_4.1.1                   bslib_0.3.1                  
##  [9] utf8_1.2.2                    R6_2.5.1                     
## [11] DBI_1.1.1                     colorspace_2.0-2             
## [13] withr_2.4.2                   tidyselect_1.1.1             
## [15] einsum_0.1.0                  bit_4.0.4                    
## [17] curl_4.3.2                    compiler_4.1.1               
## [19] Biobase_2.54.0                animation_2.7                
## [21] bookdown_0.24                 sass_0.4.0                   
## [23] filesstrings_3.2.2            tfruns_1.5.0                 
## [25] rappdirs_0.3.3                stringr_1.4.0                
## [27] digest_0.6.28                 tiff_0.1-8                   
## [29] fftwtools_0.9-11              rmarkdown_2.11               
## [31] XVector_0.34.0                base64enc_0.1-3              
## [33] jpeg_0.1-9                    pkgconfig_2.0.3              
## [35] htmltools_0.5.2               highr_0.9                    
## [37] fastmap_1.1.0                 strex_1.4.2                  
## [39] htmlwidgets_1.5.4             rlang_0.4.12                 
## [41] RSQLite_2.2.8                 shiny_1.7.1                  
## [43] visNetwork_2.1.0              jquerylib_0.1.4              
## [45] generics_0.1.1                jsonlite_1.7.2               
## [47] tensorflow_2.6.0              mmand_1.6.1                  
## [49] dplyr_1.0.7                   RCurl_1.98-1.5               
## [51] magrittr_2.0.1                GenomeInfoDbData_1.2.7       
## [53] Matrix_1.3-4                  Rcpp_1.0.7                   
## [55] S4Vectors_0.32.0              fansi_0.5.0                  
## [57] abind_1.4-5                   reticulate_1.22              
## [59] lifecycle_1.0.1               stringi_1.7.5                
## [61] whisker_0.4                   yaml_2.2.1                   
## [63] zlibbioc_1.40.0               grid_4.1.1                   
## [65] blob_1.2.2                    promises_1.2.0.1             
## [67] crayon_1.4.2                  lattice_0.20-45              
## [69] Biostrings_2.62.0             KEGGREST_1.34.0              
## [71] locfit_1.5-9.4                magick_2.7.3                 
## [73] zeallot_0.1.0                 knitr_1.36                   
## [75] pillar_1.6.4                  igraph_1.2.7                 
## [77] markdown_1.1                  stats4_4.1.1                 
## [79] glue_1.4.2                    BiocVersion_3.14.0           
## [81] evaluate_0.14                 BiocManager_1.30.16          
## [83] png_0.1-7                     vctrs_0.3.8                  
## [85] httpuv_1.6.3                  purrr_0.3.4                  
## [87] assertthat_0.2.1              cachem_1.0.6                 
## [89] xfun_0.27                     mime_0.12                    
## [91] xtable_1.8-4                  later_1.3.0                  
## [93] tibble_3.1.5                  AnnotationDbi_1.56.1         
## [95] memoise_2.0.0                 IRanges_2.28.0               
## [97] DiagrammeR_1.0.6.1            ellipsis_0.3.2               
## [99] interactiveDisplayBase_1.32.0