Skip to content
Snippets Groups Projects
Commit c0b14010 authored by Zoltan Mathe's avatar Zoltan Mathe
Browse files

Merge branch 'sqlalchemy' into 'devel'

Moved RMS db to sqlalchemy

See merge request !240
parents 39e6cd84 f6886a05
No related branches found
No related tags found
No related merge requests found
......@@ -113,7 +113,7 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
def selectMonitoringTest( self, metricName = None, serviceURI = None,
siteName = None, serviceFlavour = None,
metricStatus = None, summaryData = None,
timestamp = None, lastCheckTime = None, meta = None ):
timestamp = None, lastCheckTime = None ):
"""
Gets from MonitoringTest all rows that match the parameters given.
......@@ -134,19 +134,16 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
timestamp of the test
**lastCheckTime** - `[, datetime, list]`
last time it was cheched
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
"""
# Unused argument
return self._query( 'select', 'MonitoringTest', locals() )
return self.rmsDB.select( 'MonitoringTest', self._prepare(locals()) )
def deleteMonitoringTest( self, metricName = None, serviceURI = None,
siteName = None, serviceFlavour = None,
metricStatus = None, summaryData = None,
timestamp = None, lastCheckTime = None, meta = None ):
timestamp = None, lastCheckTime = None ):
"""
Deletes from MonitoringTest all rows that match the parameters given.
......@@ -167,14 +164,11 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
timestamp of the test
**lastCheckTime** - `[, datetime, list]`
last time it was cheched
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
"""
# Unused argument
return self._query( 'delete', 'MonitoringTest', locals() )
return self.rmsDB.delete( 'MonitoringTest', self._prepare(locals()) )
def addOrModifyMonitoringTest( self, metricName, serviceURI, siteName,
serviceFlavour, metricStatus, summaryData,
......@@ -203,9 +197,8 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
:return: S_OK() || S_ERROR()
"""
# Unused argument
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'MonitoringTest', locals() )
return self.rmsDB.addOrModify( 'MonitoringTest', self._prepare(locals()) )
##############################################################################
# JOB ACCOUNTING CACHE METHODS
......@@ -213,7 +206,7 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
def selectJobAccountingCache( self, name = None, checking = None, completed = None,
done = None, failed = None, killed = None,
matched = None, running = None, stalled = None,
lastCheckTime = None, meta = None ):
lastCheckTime = None ):
"""
Selects from JobAccountingCach all rows that match the parameters given.
......@@ -238,18 +231,17 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
number of stalled jobs
**lastCheckTime** - `datetime`
last time it was cheched
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
"""
return self._query( 'select', 'JobAccountingCache', locals() )
return self.rmsDB.select( 'JobAccountingCache', self._prepare(locals()) )
def deleteJobAccountingCache( self, name = None, checking = None, completed = None,
done = None, failed = None, killed = None,
matched = None, running = None, stalled = None,
lastCheckTime = None, meta = None ):
lastCheckTime = None ):
"""
Deletes from JobAccountingCach all rows that match the parameters given.
......@@ -274,17 +266,16 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
number of stalled jobs
**lastCheckTime** - `datetime`
last time it was cheched
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
"""
return self._query( 'delete', 'JobAccountingCache', locals() )
return self.rmsDB.delete( 'JobAccountingCache', self._prepare(locals()) )
def addOrModifyJobAccountingCache( self, name = None, checking = None, completed = None,
done = None, failed = None, killed = None,
matched = None, running = None, stalled = None,
lastCheckTime = None, meta = None ):
lastCheckTime = None ):
"""
Using `name` to query the database, decides whether to insert or update the t
table.
......@@ -310,15 +301,11 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
number of stalled jobs
**lastCheckTime** - `datetime`
last time it was cheched
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
"""
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'JobAccountingCache', locals() )
return self.rmsDB.addOrModify( 'JobAccountingCache', self._prepare(locals()) )
##############################################################################
# PILOT ACCOUNTING CACHE METHODS
......@@ -342,14 +329,12 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
number of failed pilots
**lastCheckTime** - `datetime`
last time it was cheched
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
"""
return self._query( 'select', 'PilotAccountingCache', locals() )
return self.rmsDB.select( 'PilotAccountingCache', self._prepare(locals()) )
def deletePilotAccountingCache( self, name = None, aborted = None, deleted = None,
done = None, failed = None, lastCheckTime = None,
meta = None ):
......@@ -369,13 +354,11 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
number of failed pilots
**lastCheckTime** - `datetime`
last time it was cheched
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
"""
return self._query( 'delete', 'PilotAccountingCache', locals() )
return self.rmsDB.delete( 'PilotAccountingCache', self._prepare(locals()) )
def addOrModifyPilotAccountingCache( self, name = None, aborted = None, deleted = None,
done = None, failed = None, lastCheckTime = None,
meta = None ):
......@@ -396,24 +379,94 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
number of failed pilots
**lastCheckTime** - `datetime`
last time it was cheched
**meta** - `[, dict]`
meta-data for the MySQL query. It will be filled automatically with the\
`table` key and the proper table name.
:return: S_OK() || S_ERROR()
"""
meta = { 'onlyUniqueKeys' : True }
return self._query( 'addOrModify', 'PilotAccountingCache', locals() )
return self.rmsDB.addOrModify( 'PilotAccountingCache', self._prepare(locals()) )
#..................
# ENVIRONMENT CACHE methods
def selectEnvironmentCache( self, hashKey = None, environment = None,
siteName = None, arguments = None,
dateEffective = None, lastCheckTime = None,
meta = None ):
"""
Gets from EnvironmentCache all rows that match the parameters given.
:Parameters:
**hashKey** - `[, string, list]`
hash of the environment
**environment** - `[, string, list]`
string with the environment dump
**siteName** - `[, string, list]`
name of the site
**arguments** - `[, string, list]`
SetupProject arguments
**dateEffective** - `[, datetime, list]`
creation time of the hash
**lastCheckTime** - `[, datetime, list]`
last time it was cheched
:return: S_OK() || S_ERROR()
"""
return self.rmsDB.select( 'EnvironmentCache', self._prepare(locals()) )
def deleteEnvironmentCache( self, hashKey = None, environment = None,
siteName = None, arguments = None,
dateEffective = None, lastCheckTime = None,
meta = None ):
"""
Deletes from EnvironmentCache all rows that match the parameters given.
:Parameters:
**hashKey** - `[, string, list]`
hash of the environment
**environment** - `[, string, list]`
string with the environment dump
**siteName** - `[, string, list]`
name of the site
**arguments** - `[, string, list]`
SetupProject arguments
**dateEffective** - `[, datetime, list]`
creation time of the hash
**lastCheckTime** - `[, datetime, list]`
last time it was cheched
:return: S_OK() || S_ERROR()
"""
return self.rmsDB.delete( 'EnvironmentCache', self._prepare(locals()) )
def addOrModifyEnvironmentCache( self, hashKey, environment, siteName,
arguments, dateEffective, lastCheckTime ):
"""
Using `hashKey` to query the database, decides whether to insert or update
the table.
:Parameters:
**hashKey** - `string`
hash of the environment
**environment** - `string`
string with the environment dump
**siteName** - `string`
name of the site
**arguments** - `string`
SetupProject arguments
**dateEffective** - `datetime`
creation time of the hash
**lastCheckTime** - `datetime`
last time it was cheched
:return: S_OK() || S_ERROR()
"""
def getSLSStorage( self, site = None, token = None,
availability = None, refreshPeriod = None,
validityDuration = None, totalSpace = None,
guaranteedSpace = None, freeSpace = None, meta = None ):
return self.rmsDB.addOrModify( 'EnvironmentCache', self._prepare(locals()) )
# Deprecation warning
gLogger.warn( "DEPRECATED: use getSEStorageSpace" )
return self._query( 'select', 'SLSStorage', locals() )
#.............................................................................
#
def getSEStorageSpace( self, seName ):
""" getSEStorageSpace
......@@ -477,7 +530,7 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
# def insertHammerCloudTest( self, testID, siteName, resourceName, testStatus,
# submissionTime, startTime, endTime, counterTime,
# agentStatus, formerAgentStatus, counter, meta = None ):
# agentStatus, formerAgentStatus, counter ):
# """
# Inserts on HammerCloud a new row with the arguments given.
#
......@@ -514,7 +567,7 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
# return self._query( 'insert', 'HammerCloudTest', locals() )
# def updateHammerCloudTest( self, testID, siteName, resourceName, testStatus,
# submissionTime, startTime, endTime, counterTime,
# agentStatus, formerAgentStatus, counter, meta = None ):
# agentStatus, formerAgentStatus, counter ):
# """
# Updates on HammerCloud a new row with the arguments given.
#
......@@ -553,7 +606,7 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
# def selectHammerCloudTest( self, testID = None, siteName = None, resourceName = None,
# testStatus = None, submissionTime = None, startTime = None,
# endTime = None, counterTime = None, agentStatus = None,
# formerAgentStatus = None, counter = None, meta = None ):
# formerAgentStatus = None, counter = None ):
# """
# Gets from HammerCloud all rows that match the parameters given.
#
......@@ -592,7 +645,7 @@ class ResourceManagementClient( DIRACResourceManagementClient ):
# def deleteHammerCloudTest( self, testID = None, siteName = None, resourceName = None,
# testStatus = None, submissionTime = None, startTime = None,
# endTime = None, counterTime = None, agentStatus = None,
# formerAgentStatus = None, counter = None, meta = None ):
# formerAgentStatus = None, counter = None ):
# """
# Deletes from HammerCloud all rows that match the parameters given.
#
......
......@@ -5,8 +5,10 @@
'''
from DIRAC.ResourceStatusSystem.DB.ResourceManagementDB import \
ResourceManagementDB as DIRACResourceManagementDB
from DIRAC import S_OK, S_ERROR
from DIRAC.ResourceStatusSystem.DB.ResourceManagementDB import ResourceManagementDB as DIRACResourceManagementDB
from sqlalchemy.dialects.mysql import DOUBLE, INTEGER, TIMESTAMP, TINYINT, BIGINT
from sqlalchemy import Table, Column, MetaData, String, DateTime, exc, Text, text, BLOB
__RCSID__ = "$Id$"
......@@ -22,86 +24,96 @@ class ResourceManagementDB( DIRACResourceManagementDB ):
- SLSStorage
'''
_tablesDB = DIRACResourceManagementDB._tablesDB
_tablesDB[ 'EnvironmentCache' ] = { 'Fields' : { 'HashKey' : 'VARCHAR(64) NOT NULL',
'Environment' : 'TEXT',
'SiteName' : 'VARCHAR(64) NOT NULL',
'Arguments' : 'VARCHAR(512) NOT NULL',
'DateEffective' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'},
'PrimaryKey' : [ 'HashKey' ]}
_tablesDB[ 'HammerCloudTest' ] = { 'Fields' : {'TestID' : 'INT UNSIGNED',
'SiteName' : 'VARCHAR(64) NOT NULL',
'ResourceName' : 'VARCHAR(64) NOT NULL',
'TestStatus' : 'VARCHAR(16)',
'SubmissionTime' : 'DATETIME NOT NULL',
'StartTime' : 'DATETIME',
'EndTime' : 'DATETIME',
'CounterTime' : 'DATETIME',
'AgentStatus' : 'VARCHAR(255) NOT NULL DEFAULT "Unspecified"',
'FormerAgentStatus' : 'VARCHAR(255) NOT NULL DEFAULT "Unspecified"',
'Counter' : 'INT NOT NULL DEFAULT 0' },
'PrimaryKey' : [ 'SubmissionTime' ]}
_tablesDB[ 'MonitoringTest' ] = { 'Fields' : { 'MetricName' : 'VARCHAR(128) NOT NULL',
'ServiceURI' : 'VARCHAR(128) NOT NULL',
'SiteName' : 'VARCHAR(64) NOT NULL',
'ServiceFlavour' : 'VARCHAR(64) NOT NULL',
'MetricStatus' : 'VARCHAR(512) NOT NULL',
'SummaryData' : 'BLOB NOT NULL',
'Timestamp' : 'DATETIME NOT NULL',
'LastCheckTime' : 'DATETIME NOT NULL'},
'PrimaryKey' : [ 'MetricName', 'ServiceURI' ]}
_tablesDB[ 'JobAccountingCache' ] = { 'Fields' :
{'Name' : 'VARCHAR(64) NOT NULL',
'Checking' : 'DOUBLE NOT NULL DEFAULT 0',
'Completed' : 'DOUBLE NOT NULL DEFAULT 0',
'Done' : 'DOUBLE NOT NULL DEFAULT 0',
'Failed' : 'DOUBLE NOT NULL DEFAULT 0',
'Killed' : 'DOUBLE NOT NULL DEFAULT 0',
'Matched' : 'DOUBLE NOT NULL DEFAULT 0',
'Running' : 'DOUBLE NOT NULL DEFAULT 0',
'Stalled' : 'DOUBLE NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'},
'PrimaryKey' : [ 'Name' ]
}
_tablesDB[ 'PilotAccountingCache' ] = { 'Fields' :
{ 'Name' : 'VARCHAR(64) NOT NULL',
'Aborted' : 'DOUBLE NOT NULL DEFAULT 0',
'Deleted' : 'DOUBLE NOT NULL DEFAULT 0',
'Done' : 'DOUBLE NOT NULL DEFAULT 0',
'Failed' : 'DOUBLE NOT NULL DEFAULT 0',
'LastCheckTime' : 'DATETIME NOT NULL'},
'PrimaryKey' : [ 'Name' ]
}
# TABLES THAT WILL EVENTUALLY BE DELETED
_tablesDB[ 'SLST1Service' ] = { 'Fields' :
{ 'Site' : 'VARCHAR(64) NOT NULL',
'System' : 'VARCHAR(32) NOT NULL',
'Availability' : 'TINYINT UNSIGNED NOT NULL',
'TimeStamp' : 'TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP',
'Version' : 'VARCHAR(32)',
'ServiceUptime' : 'INT UNSIGNED',
'HostUptime' : 'INT UNSIGNED',
'Message' : 'TEXT' },
'PrimaryKey' : [ 'Site', 'System' ]
}
_tablesDB[ 'SLSLogSE' ] = { 'Fields' :
{ 'Name' : 'VARCHAR(32)',
'Availability' : 'TINYINT UNSIGNED NOT NULL',
'TimeStamp' : 'TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP',
'ValidityDuration' : 'VARCHAR(32) NOT NULL',
'DataPartitionUsed' : 'TINYINT UNSIGNED',
'DataPartitionTotal' : 'BIGINT UNSIGNED'},
'PrimaryKey' : [ 'Name' ]
}
#_tablesLike = DIRACResourceManagementDB._tablesLike
#_likeToTable = DIRACResourceManagementDB._likeToTable
def createTables( self ):
EnvironmentCache = Table( 'EnvironmentCache', self.metadata,
Column( 'DateEffective', DateTime, nullable = False ),
Column( 'LastCheckTime', DateTime, nullable = False ),
Column( 'SiteName', String( 64 ), nullable = False ),
Column( 'Environment', Text ),
Column( 'HashKey', String( 64 ), nullable = False, primary_key = True ),
Column( 'Arguments', String( 512 ), nullable = False ),
mysql_engine = 'InnoDB' )
HammerCloudTest = Table( 'HammerCloudTest', self.metadata,
Column( 'Counter', INTEGER, nullable = False, server_default = '0' ),
Column( 'TestStatus', String( 16 ) ),
Column( 'TestID', INTEGER ),
Column( 'ResourceName', String( 64 ), nullable = False ),
Column( 'AgentStatus', String( 255 ), nullable = False, server_default = "Unspecified" ),
Column( 'EndTime', DateTime ),
Column( 'SiteName', String( 64 ), nullable = False ),
Column( 'FormerAgentStatus', String( 255 ), nullable = False, server_default = "Unspecified" ),
Column( 'StartTime', DateTime ),
Column( 'SubmissionTime', DateTime, nullable = False, primary_key = True ),
Column( 'CounterTime', DateTime ),
mysql_engine = 'InnoDB' )
MonitoringTest = Table( 'MonitoringTest', self.metadata,
Column( 'ServiceFlavour', String( 64 ), nullable = False ),
Column( 'ServiceURI', String( 128 ), nullable = False, primary_key = True ),
Column( 'LastCheckTime', DateTime, nullable = False ),
Column( 'MetricStatus', String( 512 ), nullable = False ),
Column( 'SiteName', String( 64 ), nullable = False ),
Column( 'Timestamp', DateTime, nullable = False ),
Column( 'SummaryData', BLOB, nullable = False ),
Column( 'MetricName', String( 128 ), nullable = False, primary_key = True ),
mysql_engine = 'InnoDB' )
JobAccountingCache = Table( 'JobAccountingCache', self.metadata,
Column( 'Failed', DOUBLE, nullable = False, server_default = '0' ),
Column( 'Running', DOUBLE, nullable = False, server_default = '0' ),
Column( 'Done', DOUBLE, nullable = False, server_default = '0' ),
Column( 'Name', String( 64 ), nullable = False, primary_key = True ),
Column( 'Stalled', DOUBLE, nullable = False, server_default = '0' ),
Column( 'Checking', DOUBLE, nullable = False, server_default = '0' ),
Column( 'Completed', DOUBLE, nullable = False, server_default = '0' ),
Column( 'Killed', DOUBLE, nullable = False ),
Column( 'LastCheckTime', DateTime, nullable = False ),
Column( 'Matched', DOUBLE, nullable = False, server_default = '0' ),
mysql_engine = 'InnoDB' )
PilotAccountingCache = Table( 'PilotAccountingCache', self.metadata,
Column( 'Name', String( 64 ), nullable = False, primary_key = True ),
Column( 'LastCheckTime', DateTime, nullable = False ),
Column( 'Deleted', DOUBLE, nullable = False, server_default = '0' ),
Column( 'Failed', DOUBLE, nullable = False, server_default = '0' ),
Column( 'Done', DOUBLE, nullable = False, server_default = '0' ),
Column( 'Aborted', DOUBLE, nullable = False, server_default = '0' ),
mysql_engine = 'InnoDB' )
# TABLES THAT WILL EVENTUALLY BE DELETED
SLST1Service = Table( 'SLST1Service', self.metadata,
Column( 'HostUptime', INTEGER ),
Column( 'Version', String( 32 ) ),
Column( 'ServiceUptime', INTEGER ),
Column( 'TimeStamp', TIMESTAMP, server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP')),
Column( 'Message', Text ),
Column( 'Availability', TINYINT, nullable = False ),
Column( 'Site', String( 64 ), nullable = False, primary_key = True ),
Column( 'System', String( 32 ), nullable = False, primary_key = True ),
mysql_engine = 'InnoDB' )
SLSLogSE = Table( 'SLSLogSE', self.metadata,
Column( 'DataPartitionTotal', BIGINT ),
Column( 'Name', String( 32 ), server_default = '0' ),
Column( 'TimeStamp', TIMESTAMP, server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP')),
Column( 'DataPartitionUsed', TINYINT ),
Column( 'ValidityDuration', String( 32 ), nullable = False ),
Column( 'Availability', TINYINT, nullable = False ),
mysql_engine = 'InnoDB' )
# create tables
try:
self.metadata.create_all( self.engine )
except exc.SQLAlchemyError as e:
self.log.exception( "createTables: unexpected exception", lException = e )
return S_ERROR( "createTables: unexpected exception %s" % e )
return S_OK()
#...............................................................................
#EOF
......@@ -26,7 +26,7 @@ def initializeResourceManagementHandler( _serviceInfo ):
db = ResourceManagementDB()
# Regenerates DB tables if needed
db._checkTable()
db.createTables()
syncObject = Synchronizer.Synchronizer()
gConfig.addListenerToNewVersionEvent( syncObject.sync )
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment