PADME Train Wiki
Train Selection
Search
Trains
PipelineTestTrain
707
libusage.py
Code fragments of libusage.py
import csv
import datetime
import os
import os.path as osp
import random
import shutil
import sys
from random import randrange
import numpy as np
import padme_conductor as pc
import pandas as pd
import pytz
import torch
import torch.nn as nn
from fhirpy import SyncFHIRClient
from keras.layers import ( Concatenate, Dense, Embedding, Flatten, Input, LeakyReLU, Multiply, )
from keras.models import Model, Sequential
from keras.optimizers import adam_v2
from padme_conductor.Plugins.FHIR import FHIRClient
from padme_conductor.Query import Query
from padme_conductor.Separation import Separation
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
def conditions_to_df(conditions):
patients_condition = []
for condition in conditions:
try:
label = condition["code"]["coding"][0]["code"]
patient_id_str = condition["subject"]["reference"]
if patient_id_str[:7] == "Patient":
patient_id = patient_id_str[8:]
patients_condition.append([patient_id, label])
except KeyError: pc.log("Key error encountered, skipping Condition...")
condition_df = pd.DataFrame(patients_condition, columns=["patient_id", "label"])
return condition_df
def patients_to_df(patients):
patients_data = []
for patient in patients:
patient_birthDate = None
try:
patient_birthDate = patient.birthDate
except: pass
patients_data.append([patient.id, patient.gender, patient_birthDate])
patient_df = pd.DataFrame( patients_data, columns=["patient_id", "gender", "birthDate"] )
return patient_df
def observations_to_df(observations):
patients_observation = {}
for observation in observations:
try:
feature = observation["category"][0]["coding"][0]["code"]
if feature in X_FEATURES:
value = observation["valueQuantity"]["value"]
patient_id_str = observation["subject"]["reference"]
if patient_id_str[:7] == "Patient":
patient_id = patient_id_str[8:]
if patient_id not in patients_observation:
patients_observation[patient_id] = {}
patients_observation[patient_id][feature] = float(value)
except KeyError: pc.log("Key error encountered, skipping Observation...")
for k in patients_observation.keys():
patients_observation[k].update(patient_id=k)
observation_df = pd.DataFrame.from_dict(patients_observation.values())
observation_df.set_index(["patient_id"])
return observation_df
def analysis(patients, observations, conditions): # """ # @Laurenz, please replace here with your code for data extraction (FHIR -> DataFrame) # """ ############################################################################################### # data = pd.read_csv('../input/data.csv') # data = data.drop(['Unnamed: 32', 'id'], axis=1) ###############################################################################################
def analysis(patients, observations, conditions): # """ # @Laurenz, please replace here with your code for data extraction (FHIR -> DataFrame) # """ ############################################################################################### # data = pd.read_csv('../input/data.csv') # data = data.drop(['Unnamed: 32', 'id'], axis=1) ###############################################################################################
patients_df = patients_to_df(patients)
observation_df = observations_to_df(observations)
condition_df = conditions_to_df(conditions)
data = pd.merge( pd.read_csv("./input/{}.csv".format(station_name)), pd.merge( pd.merge(patients_df, observation_df, on="patient_id", how="outer"), condition_df, on="patient_id", how="outer", ), on="patient_id", how="inner", )
data["label"] = data["label"].map(lambda x: "B" if x == "non-cancer" else "M")
pc.log(data.head(5))
###############################################################################################
# splitting the station-data into train and test
# splitting will be performed as 60% training data and 40% test data
# create a data frame dictionary to store your data frames
DataFrameDict = {elem: pd.DataFrame for elem in ["M", "B"]}
for key in DataFrameDict.keys():
DataFrameDict[key] = data[:][data.label == key]
data_label_B = DataFrameDict["B"]
data_label_M = DataFrameDict["M"]
data_train_B, data_test_B = train_test_split(data_label_B, test_size=0.4)
data_train_M, data_test_M = train_test_split(data_label_M, test_size=0.4)
data_train = data_train_B.append(data_train_M)
data_test = data_test_B.append(data_test_M)
##############################################################################################
# Synthetic data generation
# for the Generation we use a cGAN-Algorithm with an intuitive structure
# in the cGAN structure there is a Discriminator and a Generator
# the Generator produce synthetic data based on the real world data
# the Discriminator try to separate the real world data from the synthetic data
# the output of both get the input of the other module
# for this case we use a static value to break the cycle
# the number of iteration, batch_size, interval and number of rows can be set
# iteration is the number of cycles
# batch size is the size of the data which go from Discriminator and Generator
# intervall is show of cycle data
# number of generated row can be set
# formatting the training data of the cGAN
# label need to be 0 and 1 for better transition
if generation_synthetic_data:
labels_training = data_train[Y_FEATURE]
labels_training = labels_training.map(dict(M=1, B=0))
features_training = data_train[X_FEATURES]
# merge the data to one dataframe again for the training of cGAN
train_data_synthetic = pd.concat( [labels_training, features_training], axis=1, join="inner" )
# main method of the training of the cGAN as explained above
synthetic_data = train_synthetic( data=train_data_synthetic, iterations=5000, batch_size=128, interval=1000, number_of_row=data.shape[1], )
# the random generation for the patient-id, gender and birthday
# label needs to be transformed to the original values
label_synthetic = []
patient_id_synthetic = []
patient_gender_synthetic = []
patient_birthday_synthetic = []
# very simple approach for the missing data generation, note: can be improved
for row in range(0, len(synthetic_data)):
synthetic_data_row = synthetic_data[ row, ]
if synthetic_data_row[0] == 1:
label_synthetic.append("M")
else:
label_synthetic.append("B")
patient_id_synthetic.append(("bbmri" + str(row)))
p_g = "female"
p_b = "01.01.2000"
patient_birthday_synthetic.append(p_b)
patient_gender_synthetic.append(p_g)
# use the X_FEATURES of the synthetic data
synthetic_data = synthetic_data[:, 1:31]
# write everything in a dataframe for representation
synthetic_df = pd.DataFrame( np.c_[ patient_id_synthetic, patient_gender_synthetic, patient_birthday_synthetic, synthetic_data, label_synthetic, ], columns=[ "patient_id", "gender", "birthDate", "radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean", "compactness_mean", "concavity_mean", "concave.points_mean", "symmetry_mean", "fractal_dimension_mean", "radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se", "compactness_se", "concavity_se", "concave.points_se", "symmetry_se", "fractal_dimension_se", "radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst", "compactness_worst", "concavity_worst", "concave.points_worst", "symmetry_worst", "fractal_dimension_worst", "label", ], )
synthetic_df = pd.DataFrame( np.c_[ patient_id_synthetic, patient_gender_synthetic, patient_birthday_synthetic, synthetic_data, label_synthetic, ], columns=[ "patient_id", "gender", "birthDate", "radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean", "compactness_mean", "concavity_mean", "concave.points_mean", "symmetry_mean", "fractal_dimension_mean", "radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se", "compactness_se", "concavity_se", "concave.points_se", "symmetry_se", "fractal_dimension_se", "radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst", "compactness_worst", "concavity_worst", "concave.points_worst", "symmetry_worst", "fractal_dimension_worst", "label", ], )
""" @Toralf, please add the code for enlarging the dataset size. """
###############################################################################################
## Exploratory data analysis (EDA) and data normalization
## training data will be in this example the synthetic data generated by the GAN-Algorithm
## test data will be the real data
if generation_synthetic_data:
X_train = data_train[X_FEATURES].append( synthetic_df[X_FEATURES].astype(str).astype(float) )
y_train = data_train[Y_FEATURE].append(synthetic_df[Y_FEATURE])
else:
X_train = data_train[X_FEATURES]
y_train = data_train[Y_FEATURE]
X_test = data_test[X_FEATURES]
y_test = data_test[Y_FEATURE]
## Saving statistical data for visualization of train data
X_train_mean = X_train.mean(axis=0)
X_train_std = X_train.std(axis=0)
pc.log(f"Mean: {X_train_mean}")
pc.log(f"Std: {X_train_std}")
B = (y_train == "B").sum()
M = (y_train == "M").sum()
pc.log(f"Number of Benign: {B}")
pc.log(f"Number of Malignant : {M}")
stat_dir = pc.get_save_path() / "stat"
df_X_train_mean = pd.DataFrame([X_train_mean], columns=X_FEATURES)
df_X_train_std = pd.DataFrame([X_train_std])
df_y_train_dist = pd.DataFrame([[B, M]], columns=["B", "M"])
pc.save( df_X_train_mean.to_csv(), "X_train_mean.csv", stat_dir, separate_by=Separation.STATION, )
pc.save( df_X_train_std.to_csv(), "X_train_std.csv", stat_dir, separate_by=Separation.STATION, )
pc.save( df_y_train_dist.to_csv(), "Y_train_Dist.csv", stat_dir, separate_by=Separation.STATION, )
X_test_mean = X_test.mean(axis=0)
X_test_std = X_test.std(axis=0)
pc.log(f"Mean: {X_test_mean}")
pc.log(f"Std: {X_test_std}")
B = (y_test == "B").sum()
M = (y_test == "M").sum()
pc.log(f"Number of Benign: {B}")
pc.log(f"Number of Malignant: {M}")
df_X_test_mean = pd.DataFrame([X_test_mean], columns=X_FEATURES)
df_X_test_std = pd.DataFrame([X_test_std])
df_y_test_dist = pd.DataFrame([[B, M]], columns=["B", "M"])
pc.save( df_X_test_mean.to_csv(), "X_test_mean.csv", stat_dir, separate_by=Separation.STATION, )
pc.save( df_X_test_std.to_csv(), "X_test_std.csv", stat_dir, separate_by=Separation.STATION, )
pc.save( df_y_test_dist.to_csv(), "Y_test_Dist.csv", stat_dir, separate_by=Separation.STATION, )
# changed the name of the file
# normalization of the data
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
y_train.replace(to_replace=dict(M=1, B=0), inplace=True)
y_train = y_train.to_numpy()
X_test = scaler.fit_transform(X_test)
y_test.replace(to_replace=dict(M=1, B=0), inplace=True)
y_test = y_test.to_numpy()
## logistic regression model
model = LogisticRegression(input_dim, hidden_dim, num_classes)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD( model.parameters(), lr=learning_rate, weight_decay=weight_decay )
best_acc = 0.0
model_dir = pc.get_save_path() / "models"
if pc.is_first_execution():
model_dir.mkdir(parents=True, exist_ok=True)
torch.save( { "epoch": -1, "optim_state_dict": optimizer.state_dict(), "model_state_dict": model.state_dict(), "best_acc": 0.0, }, model_dir / "dnn.pth.tar", )
torch.save( { "epoch": -1, "optim_state_dict": optimizer.state_dict(), "model_state_dict": model.state_dict(), "best_acc": 0.0, }, model_dir / "dnn.pth.tar", )
checkpoint = torch.load(model_dir / "dnn.pth.tar")
model.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optim_state_dict"])
best_acc = checkpoint["best_acc"]
training_dir = pc.get_save_path() / "training"
filename_output_log = station_name + "logs.csv"
if generation_synthetic_data:
filename_output_log = station_name + "synthetic_logs.csv"
if pc.is_first_execution(separate_by=Separation.STATION):
header = ( ",".join( [ "epoch", "station_name", "train_loss", "val_loss", "val_acc", "val_prec", "val_rec", "val_f1", "time", ] ) + "\n" )
header = ( ",".join( [ "epoch", "station_name", "train_loss", "val_loss", "val_acc", "val_prec", "val_rec", "val_f1", "time", ] ) + "\n" )
pc.save(header, filename_output_log, training_dir)
for epoch in range(num_epochs):
perm = np.arange(X_train.shape[0])
np.random.shuffle(perm)
X_train = X_train[perm]
y_train = y_train[perm]
loss = train(X_train, y_train, model, criterion, optimizer)
val_loss, val_acc, val_precesion, val_recall, val_f1_score, val_ppv = valid( X_test, y_test, model, criterion )
# changed
log = ( ",".join( map( str, [ epoch, station_name, loss, val_loss, val_acc, val_precesion, val_recall, val_f1_score, str(datetime.datetime.now(pytz.timezone("Europe/Berlin"))), ], ) ) + "\n" )
log = ( ",".join( map( str, [ epoch, station_name, loss, val_loss, val_acc, val_precesion, val_recall, val_f1_score, str(datetime.datetime.now(pytz.timezone("Europe/Berlin"))), ], ) ) + "\n" )
pc.save(log, filename_output_log, training_dir, append=True)
torch.save( { "epoch": epoch, "optim_state_dict": optimizer.state_dict(), "model_state_dict": model.state_dict(), "best_acc": val_acc, }, model_dir / "checkpoint.pth.tar", )
torch.save( { "epoch": epoch, "optim_state_dict": optimizer.state_dict(), "model_state_dict": model.state_dict(), "best_acc": val_acc, }, model_dir / "checkpoint.pth.tar", )
if val_acc > best_acc:
shutil.copy( model_dir / "checkpoint.pth.tar", model_dir / "dnn.pth.tar", )
class LogisticRegression(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LogisticRegression, self).__init__()
self.hidden_layer = nn.Linear(input_dim, hidden_dim)
self.sigmoid = nn.Sigmoid()
self.output_layer = nn.Linear(hidden_dim, num_classes)
def forward(self, x):
out = self.output_layer(self.sigmoid(self.hidden_layer(x)))
return out
def train(X_train, y_train, model, criterion, optimizer):
inputs = torch.from_numpy(X_train).float()
targets = torch.from_numpy(y_train).long()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
return loss.item()
def valid(X_test, y_test, model, criterion):
inputs = torch.from_numpy(X_test).float()
targets = torch.from_numpy(y_test).long()
outputs = model(inputs)
val_loss = criterion(outputs, targets)
_, predicted = torch.max(outputs, 1)
cm = confusion_matrix(targets.numpy(), predicted.numpy())
tn, fp, fn, tp = cm[0][0], cm[0][1], cm[1][0], cm[1][1]
with np.errstate(divide="ignore", invalid="ignore"):
val_acc = (tp + tn) / (tp + fp + fn + tn)
val_ppv = tp / (tp + fp)
val_precesion = tp / (tp + fp)
val_recall = tp / (tp + fn)
val_f1_score = 2 * tp / (2 * tp + fn + fp)
return val_loss.item(), val_acc, val_precesion, val_recall, val_f1_score, val_ppv
def shuffle_list(lst):
lst2 = lst.copy()
random.shuffle(lst2)
return lst2
def train_synthetic(data, iterations, batch_size, interval, number_of_row):
Xtrainnew = data
mydata = Xtrainnew.values.tolist()
ytrain = []
for j in mydata:
ytrain.append(j[0])
Xtrainnew = pd.DataFrame(data=mydata)
Ytrainnew = np.array(ytrain)
scaler = MinMaxScaler()
scaled = scaler.fit_transform(Xtrainnew)
Xtrain = scaled
ytrain = Ytrainnew
real = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for iteration in range(iterations):
ids = np.random.randint(0, Xtrain.shape[0], batch_size)
imgs = Xtrain[ids]
labels = ytrain[ids]
z = np.random.normal(0, 1, (batch_size, 100))
gen_imgs = gen_v.predict([z, labels])
dloss_real = dis_v.train_on_batch([imgs, labels], real)
dloss_fake = dis_v.train_on_batch([gen_imgs, labels], fake)
dloss, accuracy = 0.5 * np.add(dloss_real, dloss_fake)
z = np.random.normal(0, 1, (batch_size, 100))
labels = np.random.randint(0, num_classes, batch_size).reshape(-1, 1)
gloss = gan_v.train_on_batch([z, labels], real)
if (iteration + 1) % interval == 0:
losses.append((dloss, gloss))
accuracies.append(100.0 * accuracy)
iteration_checks.append(iteration + 1)
pc.log( "%d [D loss: %f , acc: %.2f] [G loss: %f]" % (iteration + 1, dloss, 100.0 * accuracy, gloss) )
return show_data(gen_v, scaler, number_of_row)
def savelist2csv(mynamefile, mylist):
with open("./" + mynamefile, "w") as myfile:
wr = csv.writer(myfile, delimiter="\n", quoting=csv.QUOTE_MINIMAL)
wr.writerow(mylist)
def show_data(gen, scaler, number_of_rows):
z = np.random.normal(0, 1, (number_of_rows, 100))
labels = np.random.randint(2, size=number_of_rows)
gen_imgs = gen.predict([z, labels])
gen_imgs = scaler.inverse_transform(gen_imgs)
for index in range(0, number_of_rows):
gen_imgs[index] = np.around(gen_imgs[index], 4)
gen_imgs[index][0] = np.around(gen_imgs[index][0], 0)
return gen_imgs
def build_gen(zdim):
model = Sequential()
model.add(Dense(31, input_dim=zdim))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(1 * 31, activation="tanh"))
return model
def build_cgen(zdim):
z = Input(shape=(zdim,))
lable = Input(shape=(1,), dtype="int32")
lable_emb = Embedding(num_classes, zdim, input_length=1)(lable)
lable_emb = Flatten()(lable_emb)
joined_rep = Multiply()([z, lable_emb])
gen_v = build_gen(zdim)
c_img = gen_v(joined_rep)
return Model([z, lable], c_img)
def build_dis(img_shape):
model = Sequential()
model.add(Flatten(input_shape=img_shape))
model.add(Dense(31))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(1, activation="sigmoid"))
return model
def build_cdis(img_shape):
img = Input(shape=(img_cols,))
lable = Input(shape=(1,), dtype="int32")
lable_emb = Embedding(num_classes, np.prod((31)), input_length=1)(lable)
lable_emb = Flatten()(lable_emb)
# lable_emb=Reshape(img_shape)(lable_emb)
concate_img = Concatenate(axis=-1)([img, lable_emb])
dis_v = build_dis((img_rows, img_cols * 2))
classification = dis_v(concate_img)
return Model([img, lable], classification)
def build_cgan(genrator, discriminator):
z = Input(shape=(zdim,))
lable = Input(shape=(1,), dtype="int32")
f_img = genrator([z, lable])
classification = discriminator([f_img, lable])
model = Model([z, lable], classification)
return model
# for the data generation part there are two variable
# 1. is for if data generation method should be used or not
# 2. is for to handle outliners like minus values which could be appear
# the generated data can hold negative value, this value are biological not explainable, but from a informatics point of
# view they are reasonable
# so the station owner can decide if the methode should remove the "false" value
# Get the env vars from station software
env = pc.get_environment_vars(["FHIR_SERVER", "FHIR_PORT", "STATION_NAME"])
fhir_server = env["FHIR_SERVER"]
fhir_port = env["FHIR_PORT"]
station_name = env["STATION_NAME"].lower()
# = "UKL"
# TODO: check if the enviroment variables can hold the item for synthetic-data generation and autoremove
# generation_synthetic_data = env["GENERATION_SYNTHETIC_DATA"]
# autoremove = env["AUTOREMOVE"]
# TODO: comment out when deploy
# station_name = "UKL"
generation_synthetic_data = False
# autoremove = False
# fhir_server, fhir_port = "10.50.9.90", "80"
assert station_name in ["uka", "ukg", "ukk", "ukl", "imise", "mittweida"]
X_FEATURES = [ "radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean", "compactness_mean", "concavity_mean", "concave.points_mean", "symmetry_mean", "fractal_dimension_mean", "radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se", "compactness_se", "concavity_se", "concave.points_se", "symmetry_se", "fractal_dimension_se", "radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst", "compactness_worst", "concavity_worst", "concave.points_worst", "symmetry_worst", "fractal_dimension_worst", ]
X_FEATURES = [ "radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean", "compactness_mean", "concavity_mean", "concave.points_mean", "symmetry_mean", "fractal_dimension_mean", "radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se", "compactness_se", "concavity_se", "concave.points_se", "symmetry_se", "fractal_dimension_se", "radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst", "compactness_worst", "concavity_worst", "concave.points_worst", "symmetry_worst", "fractal_dimension_worst", ]
Y_FEATURE = "label"
# Configurations
seed = 7
num_epochs = 10000
input_dim = 30
hidden_dim = 64
num_classes = 2
learning_rate = 0.01
weight_decay = 0.0005
station_name = env["STATION_NAME"].lower()
# values for the build of the Discriminator and Generator of the cGAN
# if we need to change them, we could adjust them here and not in every definition
img_rows = 1
img_cols = 31
img_shape = (img_rows, img_cols)
zdim = 100
num_classes = 2
dis_v = build_cdis(img_shape)
dis_v.compile( loss="binary_crossentropy", optimizer=adam_v2.Adam(), metrics=["accuracy"] )
gen_v = build_cgen(zdim)
dis_v.trainable = False
gan_v = build_cgan(gen_v, dis_v)
gan_v.compile(loss="binary_crossentropy", optimizer=adam_v2.Adam())
losses = []
accuracies = []
iteration_checks = []
fhir_plugin = FHIRClient(f"http://{fhir_server}:{fhir_port}/fhir")
patients = pc.query(Query(lambda client: client.resources("Patient"), fhir_plugin))
observations = pc.query( Query( lambda client: client.resources("Observation").include("Patient", "subject"), fhir_plugin, ) )
conditions = pc.query(Query(lambda client: client.resources("Condition"), fhir_plugin))
pc.execute_analysis(analysis, patients, observations, conditions)
Graph
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
undefined
libusage.py
import csv
None
import datetime
import os
import os.path as osp
import random
import shutil
import sys
from random import randrange
import numpy as np
import padme_conductor as pc
import pandas as pd
import pytz
import torch
import torch.nn as nn
from fhirpy import SyncFHIRClient
from keras.layers import ( Concatenate, Dense, Embedding, Flatten, Input, LeakyReLU, Multiply, )
from keras.models import Model, Sequential
from keras.optimizers import adam_v2
from padme_conductor.Plugins.FHIR import FHIRClient
from padme_conductor.Query import Query
from padme_conductor.Separation import Separation
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler, StandardScaler
def conditions_to_df(conditions):
patients_condition = []
for condition in conditions:
try:
label = condition["code"]["coding"][0]["code"]
patient_id_str = condition["subject"]["reference"]
if patient_id_str[:7] == "Patient":
patient_id = patient_id_str[8:]
patients_condition.append([patient_id, label])
except KeyError: pc.log("Key error encountered, skipping Condition...")
logging
condition_df = pd.DataFrame(patients_condition, columns=["patient_id", "label"])
return condition_df
def patients_to_df(patients):
patients_data = []
for patient in patients:
patient_birthDate = None
patient_birthDate = patient.birthDate
except: pass
patients_data.append([patient.id, patient.gender, patient_birthDate])
patient_df = pd.DataFrame( patients_data, columns=["patient_id", "gender", "birthDate"] )
return patient_df
def observations_to_df(observations):
patients_observation = {}
for observation in observations:
feature = observation["category"][0]["coding"][0]["code"]
if feature in X_FEATURES:
value = observation["valueQuantity"]["value"]
patient_id_str = observation["subject"]["reference"]
if patient_id not in patients_observation:
patients_observation[patient_id] = {}
patients_observation[patient_id][feature] = float(value)
except KeyError: pc.log("Key error encountered, skipping Observation...")
for k in patients_observation.keys():
patients_observation[k].update(patient_id=k)
observation_df = pd.DataFrame.from_dict(patients_observation.values())
observation_df.set_index(["patient_id"])
return observation_df
def analysis(patients, observations, conditions): # """ # @Laurenz, please replace here with your code for data extraction (FHIR -> DataFrame) # """ ############################################################################################### # data = pd.read_csv('../input/data.csv') # data = data.drop(['Unnamed: 32', 'id'], axis=1) ###############################################################################################
patients_df = patients_to_df(patients)
observation_df = observations_to_df(observations)
condition_df = conditions_to_df(conditions)
data = pd.merge( pd.read_csv("./input/{}.csv".format(station_name)), pd.merge( pd.merge(patients_df, observation_df, on="patient_id", how="outer"), condition_df, on="patient_id", how="outer", ), on="patient_id", how="inner", )
data["label"] = data["label"].map(lambda x: "B" if x == "non-cancer" else "M")
pc.log(data.head(5))
###############################################################################################
# splitting the station-data into train and test
# splitting will be performed as 60% training data and 40% test data
# create a data frame dictionary to store your data frames
DataFrameDict = {elem: pd.DataFrame for elem in ["M", "B"]}
for key in DataFrameDict.keys():
DataFrameDict[key] = data[:][data.label == key]
data_label_B = DataFrameDict["B"]
data_label_M = DataFrameDict["M"]
data_train_B, data_test_B = train_test_split(data_label_B, test_size=0.4)
data_train_M, data_test_M = train_test_split(data_label_M, test_size=0.4)
data_train = data_train_B.append(data_train_M)
data_test = data_test_B.append(data_test_M)
##############################################################################################
# Synthetic data generation
# for the Generation we use a cGAN-Algorithm with an intuitive structure
# in the cGAN structure there is a Discriminator and a Generator
# the Generator produce synthetic data based on the real world data
# the Discriminator try to separate the real world data from the synthetic data
# the output of both get the input of the other module
# for this case we use a static value to break the cycle
# the number of iteration, batch_size, interval and number of rows can be set
# iteration is the number of cycles
# batch size is the size of the data which go from Discriminator and Generator
# intervall is show of cycle data
# number of generated row can be set
# formatting the training data of the cGAN
# label need to be 0 and 1 for better transition
if generation_synthetic_data:
labels_training = data_train[Y_FEATURE]
labels_training = labels_training.map(dict(M=1, B=0))
features_training = data_train[X_FEATURES]
# merge the data to one dataframe again for the training of cGAN
train_data_synthetic = pd.concat( [labels_training, features_training], axis=1, join="inner" )
# main method of the training of the cGAN as explained above
synthetic_data = train_synthetic( data=train_data_synthetic, iterations=5000, batch_size=128, interval=1000, number_of_row=data.shape[1], )
# the random generation for the patient-id, gender and birthday
# label needs to be transformed to the original values
label_synthetic = []
patient_id_synthetic = []
patient_gender_synthetic = []
patient_birthday_synthetic = []
# very simple approach for the missing data generation, note: can be improved
for row in range(0, len(synthetic_data)):
synthetic_data_row = synthetic_data[ row, ]
if synthetic_data_row[0] == 1:
label_synthetic.append("M")
else:
label_synthetic.append("B")
patient_id_synthetic.append(("bbmri" + str(row)))
p_g = "female"
p_b = "01.01.2000"
patient_birthday_synthetic.append(p_b)
patient_gender_synthetic.append(p_g)
# use the X_FEATURES of the synthetic data
synthetic_data = synthetic_data[:, 1:31]
# write everything in a dataframe for representation
synthetic_df = pd.DataFrame( np.c_[ patient_id_synthetic, patient_gender_synthetic, patient_birthday_synthetic, synthetic_data, label_synthetic, ], columns=[ "patient_id", "gender", "birthDate", "radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean", "compactness_mean", "concavity_mean", "concave.points_mean", "symmetry_mean", "fractal_dimension_mean", "radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se", "compactness_se", "concavity_se", "concave.points_se", "symmetry_se", "fractal_dimension_se", "radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst", "compactness_worst", "concavity_worst", "concave.points_worst", "symmetry_worst", "fractal_dimension_worst", "label", ], )
""" @Toralf, please add the code for enlarging the dataset size. """
## Exploratory data analysis (EDA) and data normalization
## training data will be in this example the synthetic data generated by the GAN-Algorithm
## test data will be the real data
X_train = data_train[X_FEATURES].append( synthetic_df[X_FEATURES].astype(str).astype(float) )
y_train = data_train[Y_FEATURE].append(synthetic_df[Y_FEATURE])
X_train = data_train[X_FEATURES]
y_train = data_train[Y_FEATURE]
X_test = data_test[X_FEATURES]
y_test = data_test[Y_FEATURE]
## Saving statistical data for visualization of train data
X_train_mean = X_train.mean(axis=0)
X_train_std = X_train.std(axis=0)
pc.log(f"Mean: {X_train_mean}")
pc.log(f"Std: {X_train_std}")
B = (y_train == "B").sum()
M = (y_train == "M").sum()
pc.log(f"Number of Benign: {B}")
pc.log(f"Number of Malignant : {M}")
stat_dir = pc.get_save_path() / "stat"
saveResult
df_X_train_mean = pd.DataFrame([X_train_mean], columns=X_FEATURES)
df_X_train_std = pd.DataFrame([X_train_std])
df_y_train_dist = pd.DataFrame([[B, M]], columns=["B", "M"])
pc.save( df_X_train_mean.to_csv(), "X_train_mean.csv", stat_dir, separate_by=Separation.STATION, )
pc.save( df_X_train_std.to_csv(), "X_train_std.csv", stat_dir, separate_by=Separation.STATION, )
pc.save( df_y_train_dist.to_csv(), "Y_train_Dist.csv", stat_dir, separate_by=Separation.STATION, )
X_test_mean = X_test.mean(axis=0)
X_test_std = X_test.std(axis=0)
pc.log(f"Mean: {X_test_mean}")
pc.log(f"Std: {X_test_std}")
B = (y_test == "B").sum()
M = (y_test == "M").sum()
pc.log(f"Number of Malignant: {M}")
df_X_test_mean = pd.DataFrame([X_test_mean], columns=X_FEATURES)
df_X_test_std = pd.DataFrame([X_test_std])
df_y_test_dist = pd.DataFrame([[B, M]], columns=["B", "M"])
pc.save( df_X_test_mean.to_csv(), "X_test_mean.csv", stat_dir, separate_by=Separation.STATION, )
pc.save( df_X_test_std.to_csv(), "X_test_std.csv", stat_dir, separate_by=Separation.STATION, )
pc.save( df_y_test_dist.to_csv(), "Y_test_Dist.csv", stat_dir, separate_by=Separation.STATION, )
# changed the name of the file
# normalization of the data
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
y_train.replace(to_replace=dict(M=1, B=0), inplace=True)
y_train = y_train.to_numpy()
X_test = scaler.fit_transform(X_test)
y_test.replace(to_replace=dict(M=1, B=0), inplace=True)
y_test = y_test.to_numpy()
## logistic regression model
model = LogisticRegression(input_dim, hidden_dim, num_classes)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD( model.parameters(), lr=learning_rate, weight_decay=weight_decay )
best_acc = 0.0
model_dir = pc.get_save_path() / "models"
if pc.is_first_execution():
retrieveResult
model_dir.mkdir(parents=True, exist_ok=True)
torch.save( { "epoch": -1, "optim_state_dict": optimizer.state_dict(), "model_state_dict": model.state_dict(), "best_acc": 0.0, }, model_dir / "dnn.pth.tar", )
checkpoint = torch.load(model_dir / "dnn.pth.tar")
model.load_state_dict(checkpoint["model_state_dict"])
optimizer.load_state_dict(checkpoint["optim_state_dict"])
best_acc = checkpoint["best_acc"]
training_dir = pc.get_save_path() / "training"
filename_output_log = station_name + "logs.csv"
filename_output_log = station_name + "synthetic_logs.csv"
if pc.is_first_execution(separate_by=Separation.STATION):
header = ( ",".join( [ "epoch", "station_name", "train_loss", "val_loss", "val_acc", "val_prec", "val_rec", "val_f1", "time", ] ) + "\n" )
pc.save(header, filename_output_log, training_dir)
for epoch in range(num_epochs):
perm = np.arange(X_train.shape[0])
np.random.shuffle(perm)
X_train = X_train[perm]
y_train = y_train[perm]
loss = train(X_train, y_train, model, criterion, optimizer)
val_loss, val_acc, val_precesion, val_recall, val_f1_score, val_ppv = valid( X_test, y_test, model, criterion )
# changed
log = ( ",".join( map( str, [ epoch, station_name, loss, val_loss, val_acc, val_precesion, val_recall, val_f1_score, str(datetime.datetime.now(pytz.timezone("Europe/Berlin"))), ], ) ) + "\n" )
pc.save(log, filename_output_log, training_dir, append=True)
torch.save( { "epoch": epoch, "optim_state_dict": optimizer.state_dict(), "model_state_dict": model.state_dict(), "best_acc": val_acc, }, model_dir / "checkpoint.pth.tar", )
if val_acc > best_acc:
shutil.copy( model_dir / "checkpoint.pth.tar", model_dir / "dnn.pth.tar", )
class LogisticRegression(nn.Module):
def __init__(self, input_dim, hidden_dim, num_classes):
super(LogisticRegression, self).__init__()
self.hidden_layer = nn.Linear(input_dim, hidden_dim)
self.sigmoid = nn.Sigmoid()
self.output_layer = nn.Linear(hidden_dim, num_classes)
def forward(self, x):
out = self.output_layer(self.sigmoid(self.hidden_layer(x)))
return out
def train(X_train, y_train, model, criterion, optimizer):
inputs = torch.from_numpy(X_train).float()
targets = torch.from_numpy(y_train).long()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
return loss.item()
def valid(X_test, y_test, model, criterion):
inputs = torch.from_numpy(X_test).float()
targets = torch.from_numpy(y_test).long()
val_loss = criterion(outputs, targets)
_, predicted = torch.max(outputs, 1)
cm = confusion_matrix(targets.numpy(), predicted.numpy())
tn, fp, fn, tp = cm[0][0], cm[0][1], cm[1][0], cm[1][1]
with np.errstate(divide="ignore", invalid="ignore"):
val_acc = (tp + tn) / (tp + fp + fn + tn)
val_ppv = tp / (tp + fp)
val_precesion = tp / (tp + fp)
val_recall = tp / (tp + fn)
val_f1_score = 2 * tp / (2 * tp + fn + fp)
return val_loss.item(), val_acc, val_precesion, val_recall, val_f1_score, val_ppv
def shuffle_list(lst):
lst2 = lst.copy()
random.shuffle(lst2)
return lst2
def train_synthetic(data, iterations, batch_size, interval, number_of_row):
Xtrainnew = data
mydata = Xtrainnew.values.tolist()
ytrain = []
for j in mydata:
ytrain.append(j[0])
Xtrainnew = pd.DataFrame(data=mydata)
Ytrainnew = np.array(ytrain)
scaler = MinMaxScaler()
scaled = scaler.fit_transform(Xtrainnew)
Xtrain = scaled
ytrain = Ytrainnew
real = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for iteration in range(iterations):
ids = np.random.randint(0, Xtrain.shape[0], batch_size)
imgs = Xtrain[ids]
labels = ytrain[ids]
z = np.random.normal(0, 1, (batch_size, 100))
gen_imgs = gen_v.predict([z, labels])
dloss_real = dis_v.train_on_batch([imgs, labels], real)
dloss_fake = dis_v.train_on_batch([gen_imgs, labels], fake)
dloss, accuracy = 0.5 * np.add(dloss_real, dloss_fake)
labels = np.random.randint(0, num_classes, batch_size).reshape(-1, 1)
gloss = gan_v.train_on_batch([z, labels], real)
if (iteration + 1) % interval == 0:
losses.append((dloss, gloss))
accuracies.append(100.0 * accuracy)
iteration_checks.append(iteration + 1)
pc.log( "%d [D loss: %f , acc: %.2f] [G loss: %f]" % (iteration + 1, dloss, 100.0 * accuracy, gloss) )
return show_data(gen_v, scaler, number_of_row)
def savelist2csv(mynamefile, mylist):
with open("./" + mynamefile, "w") as myfile:
wr = csv.writer(myfile, delimiter="\n", quoting=csv.QUOTE_MINIMAL)
wr.writerow(mylist)
def show_data(gen, scaler, number_of_rows):
z = np.random.normal(0, 1, (number_of_rows, 100))
labels = np.random.randint(2, size=number_of_rows)
gen_imgs = gen.predict([z, labels])
gen_imgs = scaler.inverse_transform(gen_imgs)
for index in range(0, number_of_rows):
gen_imgs[index] = np.around(gen_imgs[index], 4)
gen_imgs[index][0] = np.around(gen_imgs[index][0], 0)
return gen_imgs
def build_gen(zdim):
model = Sequential()
model.add(Dense(31, input_dim=zdim))
model.add(LeakyReLU(alpha=0.01))
model.add(Dense(1 * 31, activation="tanh"))
return model
def build_cgen(zdim):
z = Input(shape=(zdim,))
lable = Input(shape=(1,), dtype="int32")
lable_emb = Embedding(num_classes, zdim, input_length=1)(lable)
lable_emb = Flatten()(lable_emb)
joined_rep = Multiply()([z, lable_emb])
gen_v = build_gen(zdim)
c_img = gen_v(joined_rep)
return Model([z, lable], c_img)
def build_dis(img_shape):
model.add(Flatten(input_shape=img_shape))
model.add(Dense(31))
model.add(Dense(1, activation="sigmoid"))
def build_cdis(img_shape):
img = Input(shape=(img_cols,))
lable_emb = Embedding(num_classes, np.prod((31)), input_length=1)(lable)
# lable_emb=Reshape(img_shape)(lable_emb)
concate_img = Concatenate(axis=-1)([img, lable_emb])
dis_v = build_dis((img_rows, img_cols * 2))
classification = dis_v(concate_img)
return Model([img, lable], classification)
def build_cgan(genrator, discriminator):
f_img = genrator([z, lable])
classification = discriminator([f_img, lable])
model = Model([z, lable], classification)
# for the data generation part there are two variable
# 1. is for if data generation method should be used or not
# 2. is for to handle outliners like minus values which could be appear
# the generated data can hold negative value, this value are biological not explainable, but from a informatics point of
# view they are reasonable
# so the station owner can decide if the methode should remove the "false" value
# Get the env vars from station software
env = pc.get_environment_vars(["FHIR_SERVER", "FHIR_PORT", "STATION_NAME"])
stationParameters
fhir_server = env["FHIR_SERVER"]
fhir_port = env["FHIR_PORT"]
station_name = env["STATION_NAME"].lower()
# = "UKL"
# TODO: check if the enviroment variables can hold the item for synthetic-data generation and autoremove
# generation_synthetic_data = env["GENERATION_SYNTHETIC_DATA"]
# autoremove = env["AUTOREMOVE"]
# TODO: comment out when deploy
# station_name = "UKL"
generation_synthetic_data = False
# autoremove = False
# fhir_server, fhir_port = "10.50.9.90", "80"
assert station_name in ["uka", "ukg", "ukk", "ukl", "imise", "mittweida"]
X_FEATURES = [ "radius_mean", "texture_mean", "perimeter_mean", "area_mean", "smoothness_mean", "compactness_mean", "concavity_mean", "concave.points_mean", "symmetry_mean", "fractal_dimension_mean", "radius_se", "texture_se", "perimeter_se", "area_se", "smoothness_se", "compactness_se", "concavity_se", "concave.points_se", "symmetry_se", "fractal_dimension_se", "radius_worst", "texture_worst", "perimeter_worst", "area_worst", "smoothness_worst", "compactness_worst", "concavity_worst", "concave.points_worst", "symmetry_worst", "fractal_dimension_worst", ]
Y_FEATURE = "label"
# Configurations
seed = 7
num_epochs = 10000
input_dim = 30
hidden_dim = 64
num_classes = 2
learning_rate = 0.01
weight_decay = 0.0005
# values for the build of the Discriminator and Generator of the cGAN
# if we need to change them, we could adjust them here and not in every definition
img_rows = 1
img_cols = 31
img_shape = (img_rows, img_cols)
zdim = 100
dis_v = build_cdis(img_shape)
dis_v.compile( loss="binary_crossentropy", optimizer=adam_v2.Adam(), metrics=["accuracy"] )
gen_v = build_cgen(zdim)
dis_v.trainable = False
gan_v = build_cgan(gen_v, dis_v)
gan_v.compile(loss="binary_crossentropy", optimizer=adam_v2.Adam())
losses = []
accuracies = []
iteration_checks = []
fhir_plugin = FHIRClient(f"http://{fhir_server}:{fhir_port}/fhir")
patients = pc.query(Query(lambda client: client.resources("Patient"), fhir_plugin))
queryDatabase
observations = pc.query( Query( lambda client: client.resources("Observation").include("Patient", "subject"), fhir_plugin, ) )
conditions = pc.query(Query(lambda client: client.resources("Condition"), fhir_plugin))
pc.execute_analysis(analysis, patients, observations, conditions)
executeAnalysis
Search
Train Selection