Skip to content
Snippets Groups Projects
Commit 0c1dd474 authored by enrgarc's avatar enrgarc
Browse files

Cambios visualizador

parent 6ce7af97
Branches
No related tags found
No related merge requests found
......@@ -36,12 +36,12 @@ async def saveDataMonth(request: Request):
os.remove(f"{os.getcwd()}/SESSIONS/" + id + "-allData.json")
if(POSTGRESQL == True):
bd.execSQL(
"create table if not exists data (idMonth SERIAL primary key, name varchar(255) default null, firstDate date default null, data JSON DEFAULT NULL)")
"create table if not exists DATA (idMonth SERIAL primary key, name varchar(255) default null, firstDate date default null, data JSON DEFAULT NULL)")
else:
bd.execSQL("create table if not exists data (idMonth SERIAL primary key, name varchar(255) default null, firstDate date default null, data JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
bd.execSQL("delete from data where name='"+name+"'")
bd.execSQL("create table if not exists DATA (idMonth SERIAL primary key, name varchar(255) default null, firstDate date default null, data JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
bd.execSQL("delete from DATA where name='"+name+"'")
bd.execSQL(
"insert into data (name, firstDate, data) values('"+name+"','"+str(firstDate)+"','"+jsonData+"')")
"insert into DATA (name, firstDate, data) values('"+name+"','"+str(firstDate)+"','"+jsonData+"')")
return {
"status": True,
}
......@@ -56,11 +56,11 @@ async def getData(request: Request):
id = data['UUID']
if(POSTGRESQL == True):
bd.execSQL(
"create table if not exists data (idMonth SERIAL primary key, name varchar(255) default null, firstDate date default null, data JSON DEFAULT NULL)")
"create table if not exists DATA (idMonth SERIAL primary key, name varchar(255) default null, firstDate date default null, data JSON DEFAULT NULL)")
else:
bd.execSQL("create table if not exists data (idMonth SERIAL primary key, name varchar(255) default null, firstDate date default null, data JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
bd.execSQL("create table if not exists DATA (idMonth SERIAL primary key, name varchar(255) default null, firstDate date default null, data JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
result = bd.selectFromSQL(
'select name, firstdate as "firstDate", data from data order by "firstDate" asc')
'select name, firstdate as "firstDate", data from DATA order by "firstDate" asc')
for res in result:
for d in res:
if(d == 'data'):
......@@ -82,8 +82,8 @@ async def getData(request: Request):
@router.post("/resetData")
def resetData():
bd.execSQL("delete from data")
bd.execSQL("delete from allData")
bd.execSQL("delete from DATA")
bd.execSQL("delete from ALL_DATA")
return {
"status": True
}
......@@ -93,7 +93,7 @@ def resetData():
async def dropData(request: Request):
data = await request.json()
name = data['name']
bd.execSQL("delete from data where name='"+name+"'")
bd.execSQL("delete from DATA where name='"+name+"'")
return {
"status": True
}
......@@ -102,12 +102,12 @@ async def dropData(request: Request):
@router.post("/setAllData")
def setAllData():
if(POSTGRESQL == True):
bd.execSQL("create table if not exists allData (idVariable SERIAL primary key, name varchar(255) default null, firstDate date default null, lastDate date default null, info JSON DEFAULT NULL, data JSON DEFAULT NULL)")
bd.execSQL("create table if not exists ALL_DATA (idVariable SERIAL primary key, name varchar(255) default null, firstDate date default null, lastDate date default null, info JSON DEFAULT NULL, data JSON DEFAULT NULL)")
else:
bd.execSQL("create table if not exists allData (idVariable SERIAL primary key, name varchar(255) default null, firstDate date default null, lastDate date default null, info JSON DEFAULT NULL, data JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
bd.execSQL("delete from allData")
bd.execSQL("create table if not exists ALL_DATA (idVariable SERIAL primary key, name varchar(255) default null, firstDate date default null, lastDate date default null, info JSON DEFAULT NULL, data JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
bd.execSQL("delete from ALL_DATA")
result = bd.selectFromSQL(
'select name, firstdate as "firstDate", data from data order by "firstDate"')
'select name, firstdate as "firstDate", data from DATA order by "firstDate"')
data = {}
for res in result:
try:
......@@ -124,7 +124,7 @@ def setAllData():
del result[key]['lastDate']
del result[key]['info']
jsonData = json.dumps(result[key], ensure_ascii=False)
bd.execSQL("insert into allData (name, firstDate, lastDate, info, data) values('"+key+"','"+str(firstDate)+"','"+str(lastDate)+"','"+info+"','"+jsonData+"')")
bd.execSQL("insert into ALL_DATA (name, firstDate, lastDate, info, data) values('"+key+"','"+str(firstDate)+"','"+str(lastDate)+"','"+info+"','"+jsonData+"')")
return {
"status": True
......@@ -137,7 +137,7 @@ async def getVarsData(request: Request):
vars = data['vars']
month = data['month']
result = bd.selectFromSQL(
"select data from data where name='"+str(month)+"'")[0]
"select data from DATA where name='"+str(month)+"'")[0]
try:
monthData = json.loads(result['data'])
except:
......@@ -225,7 +225,7 @@ async def getAllData(request: Request):
allData = {}
result = bd.selectFromSQL(
'select idVariable, name, firstdate as "firstDate", lastdate as "lastDate", info, data from allData')
'select idVariable, name, firstdate as "firstDate", lastdate as "lastDate", info, data from ALL_DATA')
if(len(result) == 0):
setAllData()
for d in result:
......@@ -264,7 +264,7 @@ async def getAllData(request: Request):
@router.post("/checkAllData")
def checkAllData():
result = bd.selectFromSQL("select count(*) as count from allData")[0]
result = bd.selectFromSQL("select count(*) as count from ALL_DATA")[0]
return {
"status": True,
"data": result
......@@ -274,7 +274,7 @@ def checkAllData():
@router.post("/getSortestRangeForValues")
def getSortestRangeForValues():
result = bd.selectFromSQL(
'select max(firstDate) as "firstDate", min(lastDate) as "lastDate" from allData')[0]
'select max(firstDate) as "firstDate", min(lastDate) as "lastDate" from ALL_DATA')[0]
return {
"status": True,
"data": result
......
......@@ -26,10 +26,10 @@ async def saveDataset(request: Request):
else:
jsonData = json.dumps(data["data"], ensure_ascii=False)
if(POSTGRESQL == True):
bd.execSQL("create table if not exists datasets (idDataset SERIAL primary key, name varchar(255) default null, description varchar(255) default null, integrationType varchar(255) default null, datesRange varchar(255) default null, data JSON DEFAULT NULL)")
bd.execSQL("create table if not exists DATASET (idDataset SERIAL primary key, name varchar(255) default null, description varchar(255) default null, integrationType varchar(255) default null, datesRange varchar(255) default null, data JSON DEFAULT NULL)")
else:
bd.execSQL("create table if not exists datasets (idDataset int primary key AUTO_INCREMENT, name varchar(255) default null, description varchar(255) default null, integrationType varchar(255) default null, datesRange varchar(255) default null, data JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
bd.execSQL("insert into datasets (name, description, integrationType, datesRange, data) values('" +
bd.execSQL("create table if not exists DATASET (idDataset int primary key AUTO_INCREMENT, name varchar(255) default null, description varchar(255) default null, integrationType varchar(255) default null, datesRange varchar(255) default null, data JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
bd.execSQL("insert into DATASET (name, description, integrationType, datesRange, data) values('" +
name+"','"+description+"','"+integrationType+"','"+datesRange+"','"+jsonData+"')")
return {
"status": True,
......@@ -40,7 +40,7 @@ async def saveDataset(request: Request):
async def getDataset(request: Request):
data = await request.json()
id = data['id']
result = bd.selectFromSQL("select name, data from datasets where idDataset="+str(id))[0]
result = bd.selectFromSQL("select name, data from DATASET where idDataset="+str(id))[0]
return {
"status": True,
"data": result['data'],
......@@ -52,7 +52,7 @@ async def getDataset(request: Request):
async def getDatasetInfo(request: Request):
data = await request.json()
id = data['id']
result = bd.selectFromSQL("select data from datasets where idDataset="+str(id))[0]['data']
result = bd.selectFromSQL("select data from DATASET where idDataset="+str(id))[0]['data']
try:
df = pd.DataFrame(json.loads(result))
except:
......@@ -66,10 +66,10 @@ async def getDatasetInfo(request: Request):
@router.post("/getDatasets")
def getDatasets():
if(POSTGRESQL == True):
bd.execSQL("create table if not exists datasets (idDataset SERIAL primary key, name varchar(255) default null, description varchar(255) default null, integrationType varchar(255) default null, datesRange varchar(255) default null, data JSON DEFAULT NULL)")
bd.execSQL("create table if not exists DATASET (idDataset SERIAL primary key, name varchar(255) default null, description varchar(255) default null, integrationType varchar(255) default null, datesRange varchar(255) default null, data JSON DEFAULT NULL)")
else:
bd.execSQL("create table if not exists datasets (idDataset int primary key AUTO_INCREMENT, name varchar(255) default null, description varchar(255) default null, integrationType varchar(255) default null, datesRange varchar(255) default null, data JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
result = bd.selectFromSQL('select iddataset as "idDataset", name, description, integrationtype as "integrationType", datesrange as "datesRange" from datasets')
bd.execSQL("create table if not exists DATASET (idDataset int primary key AUTO_INCREMENT, name varchar(255) default null, description varchar(255) default null, integrationType varchar(255) default null, datesRange varchar(255) default null, data JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
result = bd.selectFromSQL('select iddataset as "idDataset", name, description, integrationtype as "integrationType", datesrange as "datesRange" from DATASET')
return {
"status": True,
"data": result
......@@ -80,8 +80,8 @@ def getDatasets():
async def deleteDataset(request: Request):
data = await request.json()
id = data["id"]
bd.execSQL("delete from datasets where idDataset="+str(id))
bd.execSQL("delete from executions where dataset="+str(id))
bd.execSQL("delete from DATASET where idDataset="+str(id))
bd.execSQL("delete from EXECUTION where dataset="+str(id))
return {
"status": True,
}
......@@ -89,8 +89,8 @@ async def deleteDataset(request: Request):
@router.post("/deleteAllDatasets")
def deleteAllDatasets():
bd.execSQL("delete from datasets")
bd.execSQL("delete from executions")
bd.execSQL("delete from DATASET")
bd.execSQL("delete from EXECUTION")
return {
"status": True,
}
......
import datetime
from fastapi.routing import APIRouter
from fastapi import BackgroundTasks
from starlette.requests import Request
......@@ -18,11 +19,11 @@ async def testModel(request: Request):
taskType = data['taskType']
params = data['params']
result = bd.selectFromSQL(
'select iddataset as "idDataset", data from datasets where idDataset='+str(idDataset))[0]
'select iddataset as "idDataset", data from DATASET where idDataset='+str(idDataset))[0]
result = mlService.testModel(result, task, taskType, params)
return {
"status": True,
"data": result
"data": json.dumps(result, cls=npEncoder.NpEncoder, ensure_ascii=False, default=default)
}
......@@ -36,20 +37,20 @@ async def generateModel(request: Request, background_tasks: BackgroundTasks):
taskType = data['taskType']
params = data['params']
dataset = bd.selectFromSQL(
'select iddataset as "idDataset", data from datasets where idDataset='+str(idDataset))[0]
'select iddataset as "idDataset", data from DATASET where idDataset='+str(idDataset))[0]
if(POSTGRESQL == True):
bd.execSQL("create table if not exists executions (idExecution SERIAL primary key, name varchar(255) default null, description varchar(2000) default null, task varchar(255) default null, taskType varchar(255) default null, dataset int default null, executionDate timestamp DEFAULT null, clusterURL varchar(255) default null, figures JSON DEFAULT NULL, df JSON DEFAULT NULL)")
bd.execSQL("create table if not exists EXECUTION (idExecution SERIAL primary key, name varchar(255) default null, description varchar(2000) default null, task varchar(255) default null, taskType varchar(255) default null, dataset int default null, executionDate timestamp DEFAULT null, clusterURL varchar(255) default null, figures JSON DEFAULT NULL, df JSON DEFAULT NULL)")
else:
bd.execSQL("create table if not exists executions (idExecution int primary key AUTO_INCREMENT, name varchar(255) default null, description varchar(2000) default null, task varchar(255) default null, taskType varchar(255) default null, dataset int default null, executionDate datetime DEFAULT null, clusterURL varchar(255) default null, figures JSON DEFAULT NULL, df JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
bd.execSQL("create table if not exists EXECUTION (idExecution int primary key AUTO_INCREMENT, name varchar(255) default null, description varchar(2000) default null, task varchar(255) default null, taskType varchar(255) default null, dataset int default null, executionDate datetime DEFAULT null, clusterURL varchar(255) default null, figures JSON DEFAULT NULL, df JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
execNumber = bd.selectFromSQL(
"select count(*) as count from executions")[0]['count'] + 1
"select count(*) as count from EXECUTION")[0]['count'] + 1
result = mlService.generateModel(
dataset, task, taskType, params, execNumber)
if(result['figures'] != None):
bd.execSQL("insert into executions (name, description, task, taskType, dataset, executionDate, clusterURL, figures, df) values('"+name+"','"+description +
bd.execSQL("insert into EXECUTION (name, description, task, taskType, dataset, executionDate, clusterURL, figures, df) values('"+name+"','"+description +
"','"+task+"','"+taskType+"','"+str(dataset['idDataset'])+"', now(),'"+result['clusterURL']+"','"+result['figures']+"','"+result['dataFrame']+"')")
else:
bd.execSQL("insert into executions (name, description, task, taskType, dataset, executionDate, clusterURL, df) values('"+name+"','" +
bd.execSQL("insert into EXECUTION (name, description, task, taskType, dataset, executionDate, clusterURL, df) values('"+name+"','" +
description+"','"+task+"','"+taskType+"','"+str(dataset['idDataset'])+"', now(),'"+result['clusterURL']+"','"+result['dataFrame']+"')")
return {
"status": True
......@@ -59,14 +60,14 @@ async def generateModel(request: Request, background_tasks: BackgroundTasks):
@router.post("/getExecutions")
def getExecutions():
if(POSTGRESQL == True):
bd.execSQL("create table if not exists executions (idExecution SERIAL primary key, name varchar(255) default null, description varchar(2000) default null, task varchar(255) default null, taskType varchar(255) default null, dataset int default null, executionDate timestamp DEFAULT null, clusterURL varchar(255) default null, figures JSON DEFAULT NULL, df JSON DEFAULT NULL)")
bd.execSQL("create table if not exists EXECUTION (idExecution SERIAL primary key, name varchar(255) default null, description varchar(2000) default null, task varchar(255) default null, taskType varchar(255) default null, dataset int default null, executionDate timestamp DEFAULT null, clusterURL varchar(255) default null, figures JSON DEFAULT NULL, df JSON DEFAULT NULL)")
else:
bd.execSQL("create table if not exists executions (idExecution int primary key AUTO_INCREMENT, name varchar(255) default null, description varchar(2000) default null, task varchar(255) default null, taskType varchar(255) default null, dataset int default null, executionDate datetime DEFAULT null, clusterURL varchar(255) default null, figures JSON DEFAULT NULL, df JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
bd.execSQL("create table if not exists EXECUTION (idExecution int primary key AUTO_INCREMENT, name varchar(255) default null, description varchar(2000) default null, task varchar(255) default null, taskType varchar(255) default null, dataset int default null, executionDate datetime DEFAULT null, clusterURL varchar(255) default null, figures JSON DEFAULT NULL, df JSON DEFAULT NULL) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci")
result = bd.selectFromSQL(
'select d.iddataset as "idDataset", d.name as name, count(*) as count from executions e inner join datasets d on d.iddataset=e.dataset group by d.iddataset')
'select d.iddataset as "idDataset", d.name as name, count(*) as count from EXECUTION e inner join DATASET d on d.iddataset=e.dataset group by d.iddataset')
for res in result:
res['executions'] = bd.selectFromSQL(
'select idexecution as "idExecution", name, description, task, tasktype as "taskType", executiondate as "executionDate" from executions where dataset='+str(res['idDataset']))
'select idexecution as "idExecution", name, description, task, tasktype as "taskType", executiondate as "executionDate" from EXECUTION where dataset='+str(res['idDataset']))
return {
"status": True,
"data": result
......@@ -77,7 +78,7 @@ def getExecutions():
async def deleteDataset(request: Request):
data = await request.json()
idExecution = data["idExecution"]
bd.execSQL("delete from executions where idExecution="+str(idExecution))
bd.execSQL("delete from EXECUTION where idExecution="+str(idExecution))
return {
"status": True,
}
......@@ -88,8 +89,12 @@ async def getExecution(request: Request):
data = await request.json()
idExecution = data["idExecution"]
result = bd.selectFromSQL(
'select clusterurl as "clusterURL", figures, df from executions where idExecution='+str(idExecution))[0]
'select clusterurl as "clusterURL", figures, df from EXECUTION where idExecution='+str(idExecution))[0]
return {
"status": True,
"data": result
}
def default(o):
if isinstance(o, (datetime.date, datetime.datetime)):
return o.isoformat()
\ No newline at end of file
......@@ -66,21 +66,15 @@ class LSTModel():
self.set_train_test_predictors(df)
return df
def series_to_supervised(self, df, response="totalBuildingEnergy", predictors=["totalBuildingEnergy"], n_in=1, n_out=1, dropnan=True):
cols = []
# input sequence (t-n, ... t-1)
for i in range(n_in, 0, -1):
cols.append(df[predictors].shift(i))
# forecast sequence (t, t+1, ... t+n)
for i in range(0, n_out):
cols.append(df[response].shift(-i))
# put it all together
agg = pd.concat(cols, axis=1)
# drop rows with NaN values
if dropnan:
agg.dropna(inplace=True)
#Converts data into [Previous Values] [Response Value]
def to_sequences(self, df, window_size, steps_predict, response="totalBuildingEnergy", predictors=["totalBuildingEnergy"]):
X=[]
y=[]
for t in range(df.shape[0]-(window_size + steps_predict - 1)):
X.append(df[predictors].iloc[t:(t+window_size)].values)
y.append(df[response].iloc[(t+window_size):(t+window_size+steps_predict)].values)
return (np.array(agg.iloc[:, 0:-n_out]), np.array(agg.iloc[:, -n_out:]))
return(np.array(X),np.array(y))
# transform series into train and test sets for supervised learning
def prepare_data(self, train, test):
......@@ -88,63 +82,27 @@ class LSTModel():
predictors = self.params["predictors"]
window_size = self.params["window_size"]
steps_predict = self.params["steps_predict"]
XT, YT = self.series_to_supervised(
train, "totalBuildingEnergy", predictors, window_size, steps_predict, True)
xt, yt = self.series_to_supervised(
test, "totalBuildingEnergy", predictors, window_size, steps_predict, True)
XT, YT = self.to_sequences(train, window_size, steps_predict, ["totalBuildingEnergy"], predictors)
xt, yt = self.to_sequences(test, window_size, steps_predict, ["totalBuildingEnergy"], predictors)
return(XT, YT, xt, yt)
# Build model
def build_model(self, X_train, y_train, new_set_up):
X_train = X_train.reshape(X_train.shape[0], 1, X_train.shape[1])
if new_set_up["activation"] == "leakyrelu":
def build_model(self, X_train, y_train, X_test, y_test, new_set_up):
y_train = y_train.reshape(y_train.shape[0], 1, y_train.shape[1])
y_test = y_test.reshape(y_test.shape[0], 1, y_test.shape[1])
model = Sequential()
model.add(LSTM(new_set_up["numNeurons"][0], return_sequences=True, input_shape=(
X_train.shape[1], X_train.shape[2])))
model.add(LeakyReLU(alpha=0.03))
model.add(LSTM(new_set_up["numNeurons"][0], return_sequences=True, input_shape=(X_train.shape[1], X_train.shape[2])))
model.add(Dropout(new_set_up["dropout"]))
model.add(LSTM(new_set_up["numNeurons"][1]))
model.add(LeakyReLU(alpha=0.03))
model.add(Dropout(new_set_up["dropout"]))
model.add(Dense(y_train.shape[1]))
elif new_set_up["activation"] == "tanh":
model = Sequential()
model.add(LSTM(new_set_up["numNeurons"][0], return_sequences=True, input_shape=(
X_train.shape[1], X_train.shape[2])))
model.add(Dropout(new_set_up["dropout"]))
model.add(LSTM(new_set_up["numNeurons"][1]))
model.add(Dropout(new_set_up["dropout"]))
model.add(Dense(y_train.shape[1]))
model.add(Dense(y_test.shape[2]))
model.compile(optimizer="adam", loss='mse')
model.fit(X_train, y_train, epochs=new_set_up["epochs"], batch_size=new_set_up["batch_size"],
model.compile(optimizer="adam", loss='mse', metrics=['accuracy'])
history = model.fit(X_train, y_train, epochs=new_set_up["epochs"], batch_size=new_set_up["batch_size"],
shuffle=False, verbose=new_set_up["verbose"], validation_split=new_set_up["validation_split"])
plot_model(model, to_file='./public/model_plot.png',
show_shapes=True, show_layer_names=True)
return model
# make one forecast with an LSTM,
def forecast_lstm(self, model, X):
# reshape input pattern to [samples, timesteps, features]
X = X.reshape(X.shape[0], 1, X.shape[1])
# make forecast
forecast = model.predict(X)
# convert to array
return [x for x in forecast[0, :]]
# inverse data transform on forecasts
def inverse_transform(self, values, train_std, train_mean):
inverted = []
steps_predict = self.params["steps_predict"]
for i in range(len(values)):
# create array from forecast
if(len(values[i]) == steps_predict):
inv = values[i][steps_predict - 1]
inv = inv * train_std + train_mean
# store
inverted.append(inv)
return inverted
plot_model(model, to_file='./public/model_plot.png', show_shapes=True, show_dtype=False, show_layer_names=False)
return ({"model": model, "history": history.history })
def standardizeTrainTest(self, train, test, attr):
train_mean = train[attr].mean()
......@@ -153,68 +111,69 @@ class LSTModel():
test[attr] = (test[attr]-train_mean)/train_std
return train, test, train_mean[attr[0]], train_std[attr[0]]
def test_model_multiple_pred(self, df, new_set_up):
train_split = df.loc[self.params["train_index"]]
test_split = df.loc[self.params["test_index"]]
# Multiple step prediction prediction
def test_model_multi_step(self, df, normalize, setUp):
train_split = df.loc[self.params["train_index"]].copy()
test_split = df.loc[self.params["test_index"]].copy()
testmean = np.mean(test_split["totalBuildingEnergy"])
train, test, train_mean, train_std = self.standardizeTrainTest(
train_split, test_split, attr=["totalBuildingEnergy"])
if(normalize == True):
train, test, train_mean, train_std = self.standardizeTrainTest(train_split, test_split, attr=["totalBuildingEnergy"])
trainX, trainY, testX, testY = self.prepare_data(train, test)
trainX = np.asarray(trainX).astype(np.float32)
trainY = np.asarray(trainY).astype(np.float32)
testX = np.asarray(testX).astype(np.float32)
testY = np.asarray(testY).astype(np.float32)
model = self.build_model(trainX, trainY, new_set_up)
testX = testX.reshape(testX.shape[0], 1, testX.shape[1])
predictions = []
for i in range(len(testX)):
# make forecast
forecast = self.forecast_lstm(model, testX[i])
# store the forecast
predictions.append(forecast)
predictions = self.inverse_transform(
predictions, train_std, train_mean)
y = self.inverse_transform(testY, train_std, train_mean)
return ({"predictions": predictions, "y": y, "XY": trainY, "cvrmse": (mean_squared_error(predictions, y)**.5)/testmean,
"rmse": mean_squared_error(predictions, y)**.5, "mae": mean_absolute_error(predictions, y)})
# Only one prediction from test data
else:
trainX, trainY, testX, testY = self.prepare_data(train_split, test_split)
modelHistory = self.build_model(trainX, trainY, testX, testY, setUp)
model = modelHistory['model']
history = modelHistory['history']
input = testX[0].reshape(1, self.params["window_size"], testX[0].shape[1])
predictions = model.predict(input, verbose=0)
if(normalize == True):
predictions = [x * train_std + train_mean for x in predictions[0]]
y = [x[0] * train_std + train_mean for x in testY[0]]
else:
predictions = [x for x in predictions[0]]
y = [x[0] for x in testY[0]]
return ({"predictions": predictions, "y": y, "cvrmse": (mean_squared_error(predictions, y)**.5)/testmean,
"rmse": mean_squared_error(predictions, y)**.5, "mae": mean_absolute_error(predictions, y),
"history": history})
def test_model(self, df, new_set_up):
train_split = df.loc[self.params["train_index"]]
test_split = df.loc[self.params["test_index"]]
# One step prediction
def test_model(self, df, normalize, setUp):
train_split = df.loc[self.params["train_index"]].copy()
test_split = df.loc[self.params["test_index"]].copy()
testmean = np.mean(test_split["totalBuildingEnergy"])
train, test, train_mean, train_std = self.standardizeTrainTest(
train_split, test_split, attr=["totalBuildingEnergy"])
if(normalize == True):
train, test, train_mean, train_std = self.standardizeTrainTest(train_split, test_split, attr=["totalBuildingEnergy"])
trainX, trainY, testX, testY = self.prepare_data(train, test)
trainX = np.asarray(trainX).astype(np.float32)
trainY = np.asarray(trainY).astype(np.float32)
testX = np.asarray(testX).astype(np.float32)
testY = np.asarray(testY).astype(np.float32)
model = self.build_model(trainX, trainY, new_set_up)
testX = testX.reshape(testX.shape[0], 1, testX.shape[1])
predictions = self.forecast_lstm(model, testX[0])
predictions = [x * train_std + train_mean for x in predictions]
y = [x * train_std + train_mean for x in testY[0]]
return ({"predictions": predictions, "y": y, "XY": trainY, "cvrmse": (mean_squared_error(predictions, y)**.5)/testmean,
"rmse": mean_squared_error(predictions, y)**.5, "mae": mean_absolute_error(predictions, y)})
else:
trainX, trainY, testX, testY = self.prepare_data(train_split, test_split)
modelHistory = self.build_model(trainX, trainY, testX, testY, setUp)
model = modelHistory['model']
history = modelHistory['history']
predictions = []
predictions = model.predict(testX)
if(normalize == True):
predictions = [x[0] * train_std + train_mean for x in predictions]
y = [x[0][0] * train_std + train_mean for x in testY]
else:
predictions = [x[0] for x in predictions]
y = [x[0][0] for x in testY]
return ({"predictions": predictions, "y": y, "cvrmse": (mean_squared_error(predictions, y)**.5)/testmean,
"rmse": mean_squared_error(predictions, y)**.5, "mae": mean_absolute_error(predictions, y), "history": history})
def exec_model(self, df):
exectime = None
t = time.time()
if(self.params['steps_predict'] == 1):
results = self.test_model_multiple_pred(df, self.params)
results = self.test_model(df, True, self.params)
else:
results = self.test_model(df, self.params)
dates = df.loc[self.params['test_index']
]["Timestamp"][:len(results['predictions'])]
results = self.test_model_multi_step(df, True, self.params)
dates = df.loc[self.params['test_index']]["Timestamp"][:len(results['predictions'])]
exectime = time.time()-t
conf = {
"1": {
......@@ -261,4 +220,7 @@ class LSTModel():
"y": results['y'],
"dates": [d for d in dates]
}
if(self.params['validation_split'] != 0):
response["loss"] = results['history']['loss']
response["val_loss"] = results['history']['val_loss']
return response
......@@ -137,30 +137,11 @@ export class GraphComponent implements OnInit {
},
scales: {
x: {
type: 'time',
type: 'time'
},
xAxes: [{
ticks: {
autoSkip: true
},
display: true,
gridLines: {
display: true
},
stacked: true
}],
yAxes: [
{
display: true,
ticks: {
beginAtZero: true
},
gridLines: {
display: true
},
stacked: false
y: {
offset: true
}
],
}
}
this.chart = new Chart(ch, {
......
......@@ -11,7 +11,7 @@
<select [(ngModel)]="model.task" (change)="resetModelData()">
<option [value]=""></option>
<option value="clustering">Clustering</option>
<!-- <option value="prediction">Predicción</option> -->
<option value="prediction">Predicción</option>
</select>
</div>
<div class="filter-area">
......
......@@ -70,7 +70,7 @@ export class ModelComponent implements OnInit {
batch_size: 32,
dropout: 0.3,
verbose: 0,
validation_split: 0,
validation_split: 0.2,
activation: "tanh",
integrationType: "d",
train_start_date: "21/9/2020 0:00:00",
......@@ -139,6 +139,7 @@ export class ModelComponent implements OnInit {
this.loading = false;
this.executed = true;
if (result['status']) {
result['data'] = JSON.parse(result['data']);
if(this.model.algorithm == 'LSTM') {
this.results = result['data']['info']
this.modelImg = environment.api + "/" + result['data']['model_estructure_URL'];
......
......@@ -60,6 +60,10 @@ export class NewMonthComponent implements OnInit {
uploadFile(event) {
let file = event.target.files[0];
if(file.name.split('.')[1] != 'xlsx' || file.name.split('.')[1] != 'xls') {
alertify.notify('La extensión del archivo no está permitida');
return;
}
this.loadingText = "Cargando datos...";
this.loading = true;
this.dataService.uploadFile(file).subscribe(
......
......@@ -54,6 +54,9 @@ export class RunsComponent implements OnInit {
}
deleteExecution(idExecution, index) {
alertify.confirm('Borrado de ejecución', 'Se va a borrar la ejecución seleccionada. ¿Desea continuar?',
() => {
this.loading = true;
this.dataService.deleteExecution(idExecution).subscribe(result => {
if (result['status']) {
let execIndex = this.allExecutions[index].executions.findIndex(e => e.idExecution == idExecution);
......@@ -64,9 +67,15 @@ export class RunsComponent implements OnInit {
}
alertify.success("Ejecución borrada correctamente");
}
alertify.confirm().destroy();
this.loading = false;
}, error => {
alertify.error("Se ha producido un error al borrar la ejecución");
alertify.confirm().destroy();
this.loading = false;
})
}, () => {
}).setting({ 'onclosing': () => { } }).set('labels', { ok: 'Aceptar', cancel: 'Cancelar' });
}
closeExecution() {
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment