Initialize SqlAlchemy database

def initialize_db(config):
    url = URL(
        config["driver"],
        config["username"],
        config["password"],
        config["hostname"],
        None,
        config["database"],
        {'charset': 'utf8'})

    # hack for sqlite databases
    url = str(url)
    engine = create_engine(url, encoding = config["encoding"], echo = config["echo"])
    return engine

def insert_into_database():
    db = initialize_db(settings.DATABASE_ENGINE)   
    db.echo = False 
    connection = db.connect()
    metadata = MetaData(db)


//Settings.py
//Add your credentials

DATABASE_ENGINE = {
    "driver": "",
    "hostname": "",
    "username": "",
    "password": "",
    "database": "",
    "encoding": "latin1",
    "echo": False
}

Send email using python Script

# pip intsall email
# Don't save the file as email.py, it will create a conflict with the built in system

from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
import smtplib


def send_mail(send_from, send_to, subject, text, files=None,
              server="smtp.gmail.com"):
    assert isinstance(send_to, list)

    msg = MIMEMultipart()
    msg['Subject'] = subject
    msg['From'] = send_from
    msg['To'] = COMMASPACE.join(send_to)
    msg.attach(MIMEText(text))

    smtp = smtplib.SMTP(server)
    smtp.ehlo()
    smtp.starttls()
    smtp.login("bbmaster@budgestudios.ca", "buildmachine")
    smtp.sendmail(send_from, send_to, msg.as_string())
    smtp.close()

Using SQL alchemy to create table

def begin_import():
    csv_data = csv.reader(codecs.open(csv_file, 'rb', 'utf-16'), delimiter='\t', quotechar='|')
    column_names = next(csv_data, None)
    try:
        session = Session()

        for row in csv_data:
            row.append(current_platform)
            session_row = ChartboostCampaignAppMetric()
      
            session_row.Date = datetime.datetime.strptime(row[0].strip('"'), "%Y-%m-%d").date()
            session_row.ToCampaignName = row[1]
            session_row.ToCampaignID = row[2]
            session_row.ToAppName = row[3]
            session_row.ToAppID = row[4]
            session_row.ToAppBundle = row[5]
            session_row.FromCampaignName = row[6]
            session_row.FromCampaignID = row[7]
            session_row.FromAppName = row[8]
            session_row.FromAppID = row[9]
            session_row.FromAppBundle = row[10]
            session_row.CampaignType = row[11]
            session_row.Role = row[12]
            session_row.AdType = row[13]
            session_row.Impressions = try_parse_int(row[14])
            session_row.Clicks = try_parse_int(row[15])
            session_row.Installs = try_parse_int(row[16])
            session_row.CTR = try_parse_float(row[17])
            session_row.IR = try_parse_float(row[18])
            session_row.MoneyEarned = try_parse_float(row[19])
            session_row.eCPMEarned = try_parse_float(row[20])
            session_row.MoneySpent = try_parse_float(row[21])
            session_row.eCPMSpent = try_parse_float(row[22])
            session_row.CompletedView = try_parse_int(row[23])
            session_row.Platform = row[24]

            try:
                session.merge(session_row)
           
            except SQLAlchemyError as e:
                log.exception("Failed to merge %s." % (e))

        session.commit()
    except:
        session.rollback()
        session.close()
        raise
    finally:
        session.close()


class ChartboostCampaignAppMetric(Base):
    __tablename__ = 'ChartboostCampaignByApps'

    Date = Column(DateTime, primary_key=True)
    ToCampaignName = Column(String(100))
    ToCampaignID =  Column(String(100), primary_key=True)
    ToAppName = Column(String(100))
    ToAppID =  Column(String(100),primary_key=True)
    ToAppBundle =  Column(String(100))
    FromCampaignName = Column(String(100))
    FromCampaignID =  Column(String(100),primary_key=True)
    FromAppName =  Column(String(100))
    FromAppID =  Column(String(100),primary_key=True)
    FromAppBundle =  Column(String(100))
    CampaignType =  Column(String(100),primary_key=True)
    Role =  Column(String(100),primary_key=True)
    AdType =  Column(String(100),primary_key=True)
    Impressions =  Column(BigInteger(),default = 0)
    Clicks =  Column(BigInteger(),default = 0)
    Installs =  Column(BigInteger(),default = 0)
    CTR =  Column(Numeric(18, 2),default = 0)
    IR =  Column(Numeric(18, 2),default = 0)
    MoneyEarned =  Column(Numeric(18, 2),default = 0)
    eCPMEarned =  Column(Numeric(18, 2),default = 0)
    MoneySpent =  Column(Numeric(18, 2),default = 0)
    eCPMSpent =  Column(Numeric(18, 2),default = 0)
    CompletedView =  Column(BigInteger(),default = 0)
    Platform =  Column(String(100),primary_key=True)

Python script to request url and download csv and parse using CSV reader

jobID_url = 'https://analytics.chartboost.com/v3/metrics/jobs/'
campaign_by_apps_url = 'https://analytics.chartboost.com/v3/metrics/campaign?'
csv_file = 'campaign_by_apps.csv'

def initialize_api_request(star_date, end_date, appid, api_token, platform):
    global current_platform
    current_platform = platform  

    platform = urllib.quote(platform)
    cleanup()
    print '[*] Initializing API request for chartboost campaign by apps for platform %s' % current_platform
    request_url = campaign_by_apps_url + 'dateMin='+ str(star_date) +'&dateMax=' + str(end_date) + '&groupBy=app&platform=' + platform + '&userId=' + appid +'&userSignature=' + api_token
    try:
        response = urllib2.urlopen(request_url)
    except urllib2.HTTPError as e:
        print '[*] Connection failed because of error code : ' + str(e.code) + ', Retrying Connection...'
        time.sleep(10)
        initialize_api_request(star_date, end_date, appid, api_token, current_platform)
    except urllib2.URLError as e:
        print '[*] Connection failed because of URLError : ' + str(e)
    else:
        response_json = json.loads(response.read())
        print '[*] Job ID successfully obtained!'
        jobID = response_json["jobId"]
        download_csv_file(jobID)

def download_csv_file(jobID):
    # It may take upto 1 minute for the file to be available
    print '[*] Starting download...'
    time.sleep(60)

    csv_dowload_url = jobID_url + jobID
    try:
        response = urllib2.urlopen(csv_dowload_url)
    except urllib2.HTTPError as e:
        print '[*] Download failed because of error code : ' + str(e.code) + ', Retrying download...'
        time.sleep(10)
        cleanup()
        download_csv_file(jobID)
    except urllib2.URLError as e:
        print '[*] Download failed because of URLError : ' + str(e)
    else:
        with open(csv_file, 'wb') as f:
            f.write(response.read())
        print '[*] Download successful!'
        with open(csv_file, 'rb') as csvfile:
            try:
                dialect = csv.Sniffer().sniff(csvfile.read(1024))
                csvfile.seek(0)
                csvfile.close()
                print '[*] CSV file is valid'
                begin_import()
            except Exception, e:   
                print '[*] CSV file is invalid because: ' + str(e) + ', Retrying download...'
                cleanup()
                download_csv_file(jobID)

def cleanup():
    print '[*] Begin cleanup'
    if os.path.isfile(csv_file):
        print '[*] CSV file exist, destroying it'
        os.remove(csv_file)
    else:
        print '[*] CSV not found, attempting to fetch a new one from the server'

def begin_import():
    csv_data = csv.reader(codecs.open(csv_file, 'rb', 'utf-16'), delimiter='\t', quotechar='|')
    column_names = next(csv_data, None) #skip the first row as they are heading
    for row in csv_data:
           # Add to database

Merge two SQL tables without duplicate

INSERT INTO BBB(id, timestamp, mycount, col1, col2, col3, etc.)
SELECT id, timestamp, mycount, col1, col2, col3, etc.
   FROM AAA
   WHERE
       NOT EXISTS(SELECT NULL FROM BBB oldb WHERE
          oldb.col1 = AAA.col1
          AND oldb.col2 = AAA.col2
          AND oldb.col3 = AAA.col3

       )

Add columns as needed to the NOT EXISTS clause.

Source: http://stackoverflow.com/questions/9747337/merging-content-of-two-tables-without-duplicating-content

Python Progress bar

import urllib2, sys

//Progress.py

def chunk_report(bytes_so_far, chunk_size, total_size):
   percent = float(bytes_so_far) / total_size
   percent = round(percent*100, 2)
   sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" %
       (bytes_so_far, total_size, percent))

   if bytes_so_far >= total_size:
      sys.stdout.write('\n')

def chunk_read(response, chunk_size=8192, report_hook=None):
   total_size = response.info().getheader('Content-Length').strip()
   total_size = int(total_size)
   bytes_so_far = 0
   data = []

   while 1:
      chunk = response.read(chunk_size)
      bytes_so_far += len(chunk)

      if not chunk:
         break

      data += chunk
      if report_hook:
         report_hook(bytes_so_far, chunk_size, total_size)

   return "".join(data)

if __name__ == '__main__':
    site= "http://download.thinkbroadband.com/50MB.zip"
    hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
       'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
       'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
       'Accept-Encoding': 'none',
       'Accept-Language': 'en-US,en;q=0.8',
       'Connection': 'keep-alive'}
    req = urllib2.Request(site, headers=hdr)
    response = urllib2.urlopen(req);
    chunk_read(response, report_hook=chunk_report)
    print "done"

Source : http://stackoverflow.com/questions/5783517/downloading-progress-bar-urllib2-python