config = configparser.ConfigParser()
config.read_file(open('dwh.cfg'))
KEY = config.get('AWS','KEY')
SECRET = config.get('AWS','SECRET')
DWH_IAM_ROLE_NAME = config.get('CLUSTER', 'DWH_IAM_ROLE_NAME')
DWH_CLUSTER_TYPE = config.get('CLUSTER', 'DWH_CLUSTER_TYPE')
DWH_NUM_NODES = config.get('CLUSTER', 'DWH_NUM_NODES')
DWH_NODE_TYPE = config.get('CLUSTER', 'DWH_NODE_TYPE')
DWH_CLUSTER_IDENTIFIER = config.get('CLUSTER', 'DWH_CLUSTER_IDENTIFIER')
DB_NAME = config.get('CLUSTER', 'DB_NAME')
DB_USER = config.get('CLUSTER', 'DB_USER')
DB_PASSWORD = config.get('CLUSTER', 'DB_PASSWORD')
DB_PORT = config.get('CLUSTER', 'DB_PORT')
ARN = config.get('IAM_ROLE', 'ARN')
ec2 = boto3.resource('ec2',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
s3 = boto3.resource('s3',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
iam = boto3.client('iam',aws_access_key_id=KEY,
aws_secret_access_key=SECRET,
region_name="us-west-2"
)
redshift = boto3.client('redshift',
region_name="us-west-2",
aws_access_key_id=KEY,
aws_secret_access_key=SECRET
)
from botocore.exceptions import ClientError
#1.1 Create the role,
try:
print("1.1 Creating a new IAM Role")
dwhRole = iam.create_role(
Path='/',
RoleName=DWH_IAM_ROLE_NAME,
Description = "Allows Redshift clusters to call AWS services on your behalf.",
AssumeRolePolicyDocument=json.dumps(
{'Statement': [{'Action': 'sts:AssumeRole',
'Effect': 'Allow',
'Principal': {'Service': 'redshift.amazonaws.com'}}],
'Version': '2012-10-17'})
)
except Exception as e:
print(e)
print("1.2 Attaching Policy")
iam.attach_role_policy(RoleName=DWH_IAM_ROLE_NAME,
PolicyArn="arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess"
)['ResponseMetadata']['HTTPStatusCode']
print("1.3 Get the IAM role ARN")
roleArn = iam.get_role(RoleName=DWH_IAM_ROLE_NAME)['Role']['Arn']
print(roleArn)
try:
response = redshift.create_cluster(
#HW
ClusterType=DWH_CLUSTER_TYPE,
NodeType=DWH_NODE_TYPE,
NumberOfNodes=int(DWH_NUM_NODES),
#Identifiers & Credentials
DBName=DB_NAME,
ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,
MasterUsername=DB_USER,
MasterUserPassword=DB_PASSWORD,
#Roles (for s3 access)
IamRoles=[roleArn]
)
except Exception as e:
print(e)
def prettyRedshiftProps(props):
pd.set_option('display.max_colwidth', -1)
keysToShow = ["ClusterIdentifier", "NodeType", "ClusterStatus", "MasterUsername", "DBName", "Endpoint", "NumberOfNodes", 'VpcId']
x = [(k, v) for k,v in props.items() if k in keysToShow]
return pd.DataFrame(data=x, columns=["Key", "Value"])
myClusterProps = redshift.describe_clusters(ClusterIdentifier=DWH_CLUSTER_IDENTIFIER)['Clusters'][0]
prettyRedshiftProps(myClusterProps)
DWH_ENDPOINT :: dwhcluster.cnaqoizhiv4v.us-west-2.redshift.amazonaws.com
DWH_ROLE_ARN :: arn:aws:iam::783882260760:role/dwhRole
An error occurred (InvalidPermission.Duplicate) when calling the AuthorizeSecurityGroupIngress operation: the specified rule "peer: 0.0.0.0/0, TCP, from port: 5439, to port: 5439, ALLOW" already exists
{'ResponseMetadata': {'RequestId': 'd17000e5-e7d6-4237-8331-adba620db8ae',
'HTTPStatusCode': 200,
'HTTPHeaders': {'x-amzn-requestid': 'd17000e5-e7d6-4237-8331-adba620db8ae',
'content-type': 'text/xml',
'content-length': '200',
'date': 'Sun, 19 Jul 2020 20:39:48 GMT'},
'RetryAttempts': 0}}
'Connected: idelfonso@sparkifydb'
* postgresql://idelfonso:***@dwhcluster.cnaqoizhiv4v.us-west-2.redshift.amazonaws.com:5439/sparkifydb
0 rows affected.
* postgresql://idelfonso:***@dwhcluster.cnaqoizhiv4v.us-west-2.redshift.amazonaws.com:5439/sparkifydb
postgresql://idelfonso:***@dwhcluster.cxna1iqaysmu.us-east-2.redshift.amazonaws.com:5439/sparkifydb
0 rows affected.