"""
Copyright (c) 2019 Nutanix Inc. All rights reserved.
Author: Tarun Mehta <[Link]@[Link]>
Description: This class handles the registration of database.
This class initializes register database workflow and executes it.
"""
from abc import ABCMeta, abstractmethod
import traceback
from nutanix_era.[Link].era_logger import get_era_common_logger
from ...[Link] import ERADatabase
from ..[Link] import StepGenWorkflow
class RegisterDatabaseOrchestrator(StepGenWorkflow, metaclass=ABCMeta):
"""
Handles the Registration of single and cluster database on new or existing db
server
This is base class and it executes the workflow defined by derived classes
"""
def __init__(self, era_op, flattened_work, host_properties, logger,
stepgen_manager):
"""
:type era_op: EraOperation
:type flattened_work: dict (flattened work attributes)
:type logger: era_logger
:type stepgen_manager: StepGenManager
"""
super(RegisterDatabaseOrchestrator, self).__init__(logger, stepgen_manager)
self.era_op = era_op
[Link] = flattened_work
self.host_properties = host_properties
self.register_dbserver = False
self.internal_operation = False
self.app_info = None
[Link] = []
if "operationId" in [Link]:
self.common_logger =
get_era_common_logger(operation_id=[Link]["operationId"])
else:
self.common_logger = get_era_common_logger()
[Link] = str([Link]('clustered')).lower() == "true"
self.database_name = [Link]('database_name') # DB name in DB
server
if not self.database_name:
self.database_name = [Link]('databaseName') # DB name in ERA
self.database_id = [Link]('databaseId')
self.log_driver_nx_cluster_id = [Link]('logDriveNxClusterId')
self.nx_cluster_id = [Link]('nxClusterId')
self.db_server_id = [Link]('dbserverId')
self.is_owner_era_server = str([Link]('isOwnerEraServer')).lower()
== "true"
self.register_dbserver = str([Link]('registerDbserver')).lower()
== "true"
self.internal_operation = str([Link]('internalOperation')).lower()
== "true"
self.skip_rollback = str([Link]('skipRollback')).lower() == "true"
[Link] = str([Link]('category')).lower()
self.database_group = [Link] == "db_group" or [Link] ==
"db_group_dbserver"
self.operation_owner_id = [Link]('ownerId')
self.era_database = None
self.register_database_result = None
self.db_server_metadata = None
self.database_group_name = ""
self.database_group_id = ""
self.database_create_new_group = False
self.auto_register_database = False
self.register_all_database = False
self.database_group_db_list = []
self.database_group_db_id_list = []
self.overall_database_group_db_list = []
[Link] = [Link]("timeMachineId", "")
self.is_fci = False
if self.database_group:
self.update_database_group_info()
if [Link]("cluster_resource_type") == "FCI" or
str([Link]("isFci")).lower() == "true":
[Link]["isFci"] = "true"
self.is_fci = True
[Link]["sql_network_name"] =
[Link]("cluster_resource_name")
def __enter__(self):
# Pre workflow steps
# Add this function in derived classes for derive class cleanup
[Link]("Enter RegisterDatabaseOrchestrator")
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Post workflow steps/cleanup
# Add this function in derived classes for derive class cleanup
[Link]("Exit RegisterDatabaseOrchestrator")
def __str__(self):
return str(self.__dict__)
def update_database_group_info(self):
# get databaseGroupInfo information :
database_group_info = {}
if [Link]("payload"):
database_group_info =
[Link]("payload").get("databaseGroupingInfo")
instance_names = []
recovery_models = []
if not database_group_info:
return
self.database_group_name =
str(database_group_info.get("newDatabaseGroupName"))
self.database_create_new_group = database_group_info.get("createNewGroup",
False)
self.database_group_id = database_group_info.get("databaseGroupId")
self.auto_register_database =
database_group_info.get("autoRegisterDatabases")
self.register_all_database =
database_group_info.get("registerAllDatabases", False)
self.database_name = self.database_group_name
databases = database_group_info.get("databases")
if not databases:
return
for database in databases:
self.database_group_db_id_list.append(([Link]("id")))
action_arguments = [Link]("actionArguments")
if action_arguments:
for prop in action_arguments:
if str([Link]("name")).lower() == "instance_name":
instance_name = [Link]("value")
if instance_name not in instance_names:
instance_names.append(instance_name)
if instance_names.__len__() > 1:
raise Exception("Group databases cannot be formed
between instances")
if str([Link]("name")).lower() == "recovery_model":
recovery_model = [Link]("value")
if recovery_model not in recovery_models:
recovery_models.append([Link]("value"))
if recovery_models.__len__() > 1:
raise Exception("Databases in the group cannot have
different recovery models")
if str([Link]("name")).lower() == "database_name":
self.database_group_db_list.append([Link]("value"))
self.overall_database_group_db_list.append([Link]("value"))
if self.database_group_id:
self.era_database = ERADatabase(id=self.database_group_id,
type="advanced", is_clone_app=False,
log_file_path=[Link]['LOG_DIRECTORY_PATH'],
log_file_name=[Link]['LOG_FILE_NAME'], logger=[Link],
fetch_info=True)
self.database_group_name = self.era_database.databaseName
self.database_name = self.database_group_name
for database in self.era_database.databases:
if [Link]("databaseName") not in
self.overall_database_group_db_list:
self.overall_database_group_db_list.append([Link]("databaseName"))
[Link](
"database_group_name {0}, database_create_new_group {1},
database_group_id {2}, database_group_list {3}, "
"overall_list {4}".format(self.database_group_name,
str(self.database_create_new_group),
self.database_group_id,
str(self.database_group_db_list),
str(self.overall_database_group_db_list)))
@abstractmethod
def initialize(self):
pass
def submit_sub_operation_for_database_registration(self, properties=None,
primary_dbserver=None):
#
# activate_database_interface
#
[Link]("start
{}:submit_sub_operation_for_database_registration".format(self.__class__.__name__))
nodes = []
for dbserver in [Link]:
node = {
"host_ip": dbserver.ip_address,
"dbserverId": dbserver.get_dbserver_id()
}
[Link](node)
parent_operation = {
"id": [Link]["operationId"],
"stepIndex": self.step_index
}
from ...[Link] import ERADatabase
self.era_database = ERADatabase(id=[Link]['databaseId'],
type="advanced", is_clone_app=False,
log_file_path=[Link]['LOG_DIRECTORY_PATH'],
log_file_name=[Link]['LOG_FILE_NAME'],
logger=[Link], fetch_info=True)
[Link]("associating database to logical cluster")
if properties:
self.era_database.set_properties(properties)
if str([Link]("cluster_resource_type")).lower() == "fci":
self.submit_sub_operation_for_fci_registration(parent_operation,
primary_dbserver)
return
self.register_database_result =
self.era_database.submit_database_activate_suboperation(nodes, [Link][
'databaseId'], parent_operation, clustered=[Link],
operation_owner_id=self.operation_owner_id)
def submit_sub_operation_for_fci_registration(self, parent_operation,
primary_dbserver):
#
# fci db registration
#
[Link]("start
{}:submit_sub_operation_for_fci_registration".format(self.__class__.__name__))
# In case of FCI we first register the FCI active node and later use the
properties from the active node as the
# properties for the passive node. hence the submit sub operation is split
into two different calls
nodes = []
node = {
"host_ip": primary_dbserver.ip_address,
"dbserverId": primary_dbserver.get_dbserver_id()
}
[Link](node)
[Link]("register fci active")
self.register_database_result =
self.era_database.submit_database_activate_suboperation(nodes, [Link][
'databaseId'], parent_operation, clustered=[Link],
operation_owner_id=self.operation_owner_id,
set_ready_state=False)
if not self.register_database_result["overallStatus"]:
error_message = self.register_database_result.get("errorMessage",
"Failed to register database")
raise Exception(error_message)
nodes = []
[Link]("register fci passive nodes")
for dbserver in [Link]:
if dbserver == primary_dbserver:
[Link]("skipping active")
continue
node = {
"host_ip": dbserver.ip_address,
"dbserverId": dbserver.get_dbserver_id()
}
[Link](node)
# register database result is being overwritten, as it is not being used in
the rollback, the merge of register
# results has not been done. if need be, merging or register result can be
done
self.register_database_result =
self.era_database.submit_database_activate_suboperation(nodes, [Link][
'databaseId'], parent_operation, clustered=[Link],
operation_owner_id=self.operation_owner_id,
is_pd_shared=True)
def activate_time_machine(self):
#
# Activate time machine of the database
#
[Link]("start
{}:activate_time_machine".format(self.__class__.__name__))
# Do not activate the time machine, if database_group_id is passed.
if self.database_group_id:
return
if not self.era_database:
self.era_database = ERADatabase(id=[Link]['databaseId'],
type="advanced", is_clone_app=False,
log_file_path=[Link]['LOG_DIRECTORY_PATH'],
log_file_name=[Link]['LOG_FILE_NAME'], logger=[Link],
fetch_info=True)
application_info = {
"id": [Link]['databaseId'],
"name": self.database_name
}
log_drive_mount_point = "/home/era/era_base/log_drive/winlogdrive_" + str(
RegisterDatabaseOrchestrator.convert_app_name_to_valid_dir_path(self.database_name)
)
if self.is_owner_era_server:
self.fetch_database_app_info()
basic_info = self.app_info["basic_info"]
db_size = basic_info["SIZE"]
db_size_unit = basic_info.get("SIZE_UNIT", "GB")
era_server_info = {
"mount_point": log_drive_mount_point,
"disk_size": [Link]["log_disk_size"],
"db_size": db_size,
"db_size_unit": db_size_unit,
"cloud_id_list": [Link]("pitrEnabledClusterIds"),
"log_drive_fs_type": "ntfs"
}
parent_operation = {
"id": [Link]["operationId"],
"stepIndex": self.step_index
}
status = self.era_database.time_machine_activate(self.is_owner_era_server,
parent_operation, application_info,
era_server_info,
operation_owner_id=self.operation_owner_id)
if not status:
raise Exception("Failed to activate the time machine of database.")
#
# Associating the database to all the DB servers so that clone into source
can directly be done
#
if not (self.database_group or str([Link]("isFci")).lower() ==
"true"):
self.associate_time_machine_with_dbservers()
#
# Save database properties
#
self.save_database_properties()
#
# Updating auto_register flag on the dbserver
#
self.update_auto_register_flag()
def update_auto_register_flag(self):
[Link]("Start update_auto_register_flag")
if self.auto_register_database:
[Link]("Set db_group_auto_registration as True")
# set dbserver property with auto-register = True.
from
nutanix_era.era_drivers.[Link] import
ERADbServer
dbserver = ERADbServer(id=self.db_server_id,
type="basic",
logger=[Link],
fetch_info=True)
properties = []
prop = {}
prop["name"] = "db_group_auto_registration"
prop["value"] = True
[Link](prop)
dbserver.set_properties(properties)
def associate_time_machine_with_dbservers(self):
[Link]("associating time machine with dbservers")
[Link]([Link])
time_machine_id = self.era_database.get_time_machine_object().id
import nutanix_era.common.mgmt_server.TimeMachineUtil as TimeMachineUtil
time_machine_util = [Link](None)
dbserver_id_list = []
for dbserver in [Link]:
[Link]("associating time machine " + str(time_machine_id) +
" with dbserver " + str(dbserver.ip_address))
dbserver_id_list.append(dbserver.get_dbserver_id())
# in case of register operation picked directly on dbserver, dbserver
object is not created
if not [Link]:
dbserver_id_list.append(self.db_server_id)
[Link]("associating time machine " + str(time_machine_id) +
" with dbserver " + str(self.db_server_id))
[Link]("dbserver ID list:" + str(dbserver_id_list))
try:
status = time_machine_util.associate_dbserver_with_time_machine(
id=time_machine_id, is_name=False,
dbserver_id_list=dbserver_id_list)
if not status:
return 0
except Exception as e:
[Link](str(str(e)))
[Link](traceback.format_exc())
[Link]("failed to associate time machine " +
str(time_machine_id) + " with dbservers " +
str(dbserver_id_list))
return 1
def save_database_properties(self):
#
# Save database properties
#
try:
self.set_database_info()
[Link]("Saving database properties...")
basic_info = self.app_info["basic_info"]
properties = []
for key in basic_info:
[Link]({"name": key, "value": basic_info[key]})
if [Link]("recovery_model"):
[Link]({"name": "recovery_model", "value":
[Link]["recovery_model"]})
if [Link]("era_manage_log"):
[Link]({"name": "era_manage_log", "value":
[Link]["era_manage_log"]})
[Link]({"name": "isFci", "value":
str([Link]("cluster_resource_type")).lower() == "fci"})
[Link](properties)
self.era_database.set_properties(properties)
[Link]("Updating database metrics...")
metrics_object = {"storage": {"size": basic_info['SIZE'], "usedSize":
basic_info['used_size'], "unit": basic_info['SIZE_UNIT']}}
self.era_database.update_database_metrics(metrics_object)
except Exception as ex:
[Link](traceback.format_exc())
[Link]("Failed to save database properties. Error: " +
str(str(ex)))
def set_database_info(self):
#
# Setting basic info to database info column.
# TODO - Need to revisit if we are okay to use database properties instead
of info. Keeping the code for
# future use
#
[Link]("Start set_database_info")
basic_info = self.app_info["basic_info"]
self.era_database.set_info(info=basic_info, secure_info=None)
def fetch_database_app_info(self):
#
# Fetch the app info json from ERA server. As part of Activate call app
info is loaded to ERA server.
#
[Link]("start fetch_database_app_info")
database_nodes = self.era_database.get_All_databaseNode()
first_database_node_id = database_nodes[0]["id"] # Get first node TODO get
MAX size of database for cluster db
[Link]("database node id : {}".format(first_database_node_id))
from ...[Link] import
ERADatabaseNode
database_node = ERADatabaseNode(id=first_database_node_id,
database_id=self.era_database.id,
type="advanced", fetch_info=False,
logger=[Link])
self.app_info = database_node.get_app_info_json()['info']
@staticmethod
def convert_app_name_to_valid_dir_path(app_name):
import re
app_name = [Link](r'[^a-zA-Z0-9._-]', "_", app_name)
return app_name