# Go to: https://www.quandl.com/ # Sign up / in with your email address and password # Run Terminal on MacOS # ########## Terminal ### TO UPDATE cd "YOUR_WORKING_DIRECTORY" ########## python3 # # Run python on Terminal # # python python3 --version #Python 3.8.2 python3 # # a list of Python modules available (i.e. installed) on a machine #help('modules') # Please make sure that sys, pandas, csv, and numpy are installed. Also, quandl will be installed. # # Show where the modules are stored on my machine import sys print(sys.path) # Please make sure that the following sys.path is included. #'/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8' quit() ########## Access the following Quandl's GitHub web page: #https://github.com/quandl/quandl-python # Clone or download > Download ZIP > and save "quandl-python-master" folder on the python library directory above. It goes like this: #'/Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/quandl-python-master' ########## Terminal python3 /Library/Frameworks/Python.framework/Versions/3.8/lib/python3.8/quandl-python-master/setup.py install ########## python3 # python3 ##### import quandl library and get sample data without authentication import quandl #####Usage Limits #The Quandl Python module is free. If you would like to make more than 50 calls a day, however, you will need to create a free #Quandl account and set your API key: # # Create or sign in your account to check your API key: # https://www.quandl.com/account/profile # ### TO UPDATE quandl.ApiConfig.api_key = "YOUR_KEY_HERE" #Test: getting Quandl data #US GDP from FRED mydata = quandl.get("FRED/GDP") mydata # go to: https://www.quandl.com/search # # Filters # - Free # # Asset Class # - Equities # # Data Type # - Price & Volumes # # Region # - United States # # Select: #NASDAQ OMX Global Index Data #https://www.quandl.com/data/NASDAQOMX-NASDAQ-OMX-Global-Index-Data # # Select # #NASDAQ-100 Technology Sector Total Return (NTTR) #https://www.quandl.com/data/NASDAQOMX/NTTR-NASDAQ-100-Technology-Sector-Total-Return-NTTR # Press EXPORT DATA > Libraries: Python # Copy #quandl.get("NASDAQOMX/NTTR", authtoken="YOUR_KEY_HERE") # #NASDAQ-100 Ex-Tech Total Return (NXTR) #https://www.quandl.com/data/NASDAQOMX/NXTR-NASDAQ-100-Ex-Tech-Total-Return-NXTR # Press EXPORT DATA > Libraries: Python # Copy #quandl.get("NASDAQOMX/NXTR", authtoken="YOUR_KEY_HERE") # #NASDAQ Financial 100 (IXF) #https://www.quandl.com/data/NASDAQOMX/IXF-NASDAQ-Financial-100-IXF # Press EXPORT DATA > Libraries: Python # Copy #quandl.get("NASDAQOMX/IXF", authtoken="YOUR_KEY_HERE") |
# Run Terminal on MacOS # ########## Terminal ### TO UPDATE cd "YOUR_WORKING_DIRECTORY" python3 #You need to save the following py files (e.g., fti01datay.py) on "YOUR_WORKING_DIRECTORY" before running this script. import fti01datay #import fti02param from fti02param import * #print(dma) from fti03mu import * from fti04yminusmu import * from fti05sigma import * from fti06corr import * from fti07covftimsi import * from fti08ftima import * from fti09ftipctrank import * from fti10msima import * from fti11csi import * from fti12csima import * from fti13csipctrank import * |
1. Getting index data and calculate returns y
##### import quandl library import quandl ##### import csv library to write and read csv files import csv ##### import pandas library import pandas as pd ### TO UPDATE quandl.ApiConfig.api_key = "YOUR_KEY_HERE" #####Quandl data # go to: https://www.quandl.com/search # # Filters # - Free # # Asset Class # - Equities # # Data Type # - Price & Volumes # # Region # - United States # # Select: #NASDAQ OMX Global Index Data #https://www.quandl.com/data/NASDAQOMX-NASDAQ-OMX-Global-Index-Data # # Then see each page of the follwoing three indices: #NASDAQ-100 Technology Sector Total Return (NTTR) #https://www.quandl.com/data/NASDAQOMX/NTTR-NASDAQ-100-Technology-Sector-Total-Return-NTTR # Press EXPORT DATA > Libraries: Python # Copy def NTTR(): dt_NTTR = quandl.get("NASDAQOMX/NTTR") dt_NTTR['NTTR'] = dt_NTTR['Index Value'].pct_change() dt_NTTR['NTTR'].to_csv("NTTR.csv") NTTR() #NASDAQ-100 Ex-Tech Total Return (NXTR) #https://www.quandl.com/data/NASDAQOMX/NXTR-NASDAQ-100-Ex-Tech-Total-Return-NXTR # Press EXPORT DATA > Libraries: Python # Copy def NXTR(): dt_NXTR = quandl.get("NASDAQOMX/NXTR") dt_NXTR['NXTR'] = dt_NXTR['Index Value'].pct_change() dt_NXTR['NXTR'].to_csv("NXTR.csv") NXTR() #NASDAQ Financial 100 (IXF) #https://www.quandl.com/data/NASDAQOMX/IXF-NASDAQ-Financial-100-IXF # Press EXPORT DATA > Libraries: Python # Copy def IXF(): dt_IXF = quandl.get("NASDAQOMX/IXF") dt_IXF['IXF'] = dt_IXF['Index Value'].pct_change() dt_IXF['IXF'].to_csv("IXF.csv") IXF() ##### merge several y data into one by using pandas.concat NTTR = pd.read_csv('NTTR.csv', header=0) NTTR = NTTR.set_index('Trade Date') print(NTTR) NXTR = pd.read_csv('NXTR.csv', header=0) NXTR = NXTR.set_index('Trade Date') print(NXTR) IXF = pd.read_csv('IXF.csv', header=0) IXF = IXF.set_index('Trade Date') print(IXF) y = pd.concat([NTTR, NXTR, IXF], axis=1, join='inner') print(y) y = y.dropna(how="any") print(y) y.to_csv("y.csv") |
2. Setting parameters
# moving average days for asset mean returns (mu), standard deviations (sigma), and correlations (rho) dma = (250 * 5) # percentile rank: moving average lookback days for FTI FTI_pct_rank_dma = 30 # percentile rank: time period of percentile rank calculation FTI_pct_rank_dtp = (250 * 5) |
3. Moving simple average returns mu
fti03mu.py
##### import libraries import pandas as pd import numpy as np from fti02param import * y = pd.read_csv('y.csv') print(y) print(dma) mu = pd.concat([y.iloc[:,0], y.rolling(window=dma).mean()], axis=1, join='inner') print(mu) mu = mu.dropna(how="any") print(mu) mu.to_csv("mu.csv", index=False) |
4. y- mu
fti04yminusmu.py
##### import libraries import pandas as pd import numpy as np from fti02param import * y = pd.read_csv('y.csv', index_col=0) mu = pd.read_csv('mu.csv', index_col=0) print(y) print(mu) ymmu = y - mu print(ymmu) ymmu = ymmu.dropna(how="any") print(ymmu) ymmu.to_csv("ymmu.csv") |
5. Sigma (volatility) σ
##### import libraries import pandas as pd import numpy as np from fti02param import * y = pd.read_csv('y.csv', index_col=0) print(y) print(dma) sigma = y.rolling(window=dma).std() print(sigma) sigma = sigma.dropna(how="any") print(sigma) sigma.to_csv("sigma.csv") |
6. Correlations of y
##### import libraries import pandas as pd import numpy as np from fti02param import * y = pd.read_csv('y.csv', index_col=0) print(y) print(dma) corr = y.rolling(window=dma).corr() print(corr) corr = corr.dropna(how="any") print(corr) corr.to_csv("corr.csv") |
7. Variance-Covariance Matrix, Financial Turbulence Index (FTI), and Magnitude Surprise Index (MSI)
fti07covftimsi.py
##### import libraries import pandas as pd import numpy as np import csv from fti02param import * y = pd.read_csv('y.csv', index_col=0) ymmu = pd.read_csv('ymmu.csv', index_col=0) cov = y.rolling(window=dma).cov() #print(cov) cov = cov.dropna(how="any") #print(cov) cov.to_csv("cov.csv") #len(cov.columns) #3 #cov[0:3] # NTTR NXTR IXF #Trade Date #2009-08-20 NTTR 0.000452 0.000392 0.000470 # NXTR 0.000392 0.000402 0.000473 # IXF 0.000470 0.000473 0.000786 #np.linalg.inv(cov[0:3]) #array([[ 14403.24297589, -13429.93270314, -521.2216743 ], # [-13429.93270314, 21077.19070866, -4666.48714028], # [ -521.2216743 , -4666.48714028, 4394.71065572]]) #print(np.linalg.inv(cov[0:3])) #[[ 14403.24297589 -13429.93270314 -521.2216743 ] # [-13429.93270314 21077.19070866 -4666.48714028] # [ -521.2216743 -4666.48714028 4394.71065572]] #np.linalg.inv(cov[0:3]).shape #(3, 3) # np.array(ymmu[0:1]) #array([[0.00788763, 0.00722348, 0.00716573]]) # np.array(ymmu[0:1]).shape #(1, 3) # np.array(ymmu[1:2]) #array([[0.01311559, 0.01776113, 0.01831083]]) # np.array(ymmu[0:1]).T #array([[0.00788763], # [0.00722348], # [0.00716573]]) # np.array(ymmu[0:1]).T.shape #(3, 1) #np.array(ymmu[0:1]) * np.linalg.inv(cov[0:3]) * np.array(ymmu[0:1]).T #(1, 3)(3, 3)(3, 1) #(1, 3)(3, 1) #(1) #np.array(ymmu[0:1]).dot(np.linalg.inv(cov[0:3])) # np.array(ymmu[0:1]).dot(np.linalg.inv(cov[0:3])).dot(np.array(ymmu[0:1]).T) #array([[0.14915166]]) # ymmu # NTTR NXTR IXF #Trade Date #2009-08-20 0.007888 0.007223 0.007166 #2009-08-21 0.013116 0.017761 0.018311 #2009-08-24 -0.008065 -0.002483 -0.016052 #2009-08-25 0.001166 0.008367 0.004676 #2009-08-26 0.002793 -0.002977 0.002221 #... ... ... ... #2020-04-28 -0.014821 -0.010789 0.007708 #2020-04-29 0.040584 0.018247 0.026535 #2020-04-30 -0.020283 -0.015237 -0.026131 #2020-05-01 -0.045328 -0.020033 -0.033132 #2020-05-05 -0.000719 -0.000370 -0.000112 # #[2701 rows x 3 columns] # cov['2009-08-20':'2009-08-20'] # NTTR NXTR IXF #Trade Date #2009-08-20 NTTR 0.000452 0.000392 0.000470 # NXTR 0.000392 0.000402 0.000473 # IXF 0.000470 0.000473 0.000786 # cov[0:3] # NTTR NXTR IXF #Trade Date #2009-08-20 NTTR 0.000452 0.000392 0.000470 # NXTR 0.000392 0.000402 0.000473 # IXF 0.000470 0.000473 0.000786 # # cov[3:6] # NTTR NXTR IXF #Trade Date #2009-08-21 NTTR 0.000452 0.000392 0.000470 # NXTR 0.000392 0.000402 0.000474 # IXF 0.000470 0.000474 0.000786 # cov.index[0][0] #'2009-08-20' # cov.index[3][0] #'2009-08-21' # cov.index[0][0] #'2009-08-20' # np.array(ymmu[0:1]).dot(np.linalg.inv(cov[0:3])).dot(np.array(ymmu[0:1]).T) #array([[0.14915166]]) # cov.index[3][0] #'2009-08-21' # np.array(ymmu[1:2]).dot(np.linalg.inv(cov[3:6])).dot(np.array(ymmu[1:2]).T) #array([[1.05672594]]) # int(len(cov)/len(cov.columns)) #8103 / 3 #2701 with open('FTI.csv', 'w') as f: writer = csv.writer(f) writer.writerow(["Trade Date", "FTI"]) #tmp = [cov.index[0][0], float(np.array(ymmu[0:1]).dot(np.linalg.inv(cov[0:3])).dot(np.array(ymmu[0:1]).T))] #tmp = [cov.index[3][0], float(np.array(ymmu[1:2]).dot(np.linalg.inv(cov[3:6])).dot(np.array(ymmu[1:2]).T))] #tmp = [cov.index[6][0], float(np.array(ymmu[2:3]).dot(np.linalg.inv(cov[6:9])).dot(np.array(ymmu[2:3]).T))] #for i in range(3): # print(i) # # 0 # 1 # 2 for i in range(int(len(cov)/len(cov.columns))): tmp = [cov.index[i*len(cov.columns)][0], float(np.array(ymmu[i:i+1]).dot(np.linalg.inv(cov[i*len(cov.columns):(i+1)*len(cov.columns)])).dot(np.array(ymmu[i:i+1]).T))] # with open('FTI.csv', 'a') as f: writer = csv.writer(f) writer.writerow(tmp) ##### Magnitude Surprise Index (MSI) with open('MSI.csv', 'w') as f2: writer = csv.writer(f2) writer.writerow(["Trade Date", "MSI"]) for i in range(int(len(cov)/len(cov.columns))): tmp2 = [cov.index[i*len(cov.columns)][0], float(np.array(ymmu[i:i+1]).dot(np.linalg.inv(np.triu(np.tril(cov[i*len(cov.columns):(i+1)*len(cov.columns)])))).dot(np.array(ymmu[i:i+1]).T))] # with open('MSI.csv', 'a') as f2: writer = csv.writer(f2) writer.writerow(tmp2) |
8. Moving average of FTI (moving average time period in days: FTI_pct_rank_dma)
fti08ftima.py
##### import libraries import pandas as pd import numpy as np from fti02param import * FTI = pd.read_csv('FTI.csv', index_col=0) #print(FTI) print(FTI_pct_rank_dma) FTIdma = FTI.rolling(window=FTI_pct_rank_dma).mean() #print(FTIdma) FTIdma = FTIdma.dropna(how="any") print(FTIdma) FTIdma.to_csv("FTIdma.csv") |
9. FTI percentile rank (percentile rank time period: FTI_pct_rank_dtp)
##### import libraries import pandas as pd import numpy as np from fti02param import * FTIdma = pd.read_csv('FTIdma.csv', index_col=0) #print(FTIdma) #type(FTIdma) #<class 'pandas.core.frame.DataFrame'> #print(FTI_pct_rank_dtp) FTIpctrank = FTIdma.rolling(FTI_pct_rank_dtp).apply(lambda x: pd.Series(x).rank().values[-1])/FTI_pct_rank_dtp #print(FTIpctrank) FTIpctrank = FTIpctrank.dropna(how="any") #print(FTIpctrank) FTIpctrank.to_csv("FTIpctrank.csv") |
10. Moving average of MSI (moving average time period in days: FTI_pct_rank_dma)
##### import libraries import pandas as pd import numpy as np from fti02param import * MSI = pd.read_csv('MSI.csv', index_col=0) #print(MSI) #print(FTI_pct_rank_dma) MSIdma = MSI.rolling(window=FTI_pct_rank_dma).mean() #print(MSIdma) MSIdma = MSIdma.dropna(how="any") print(MSIdma) MSIdma.to_csv("MSIdma.csv") |
11. Correlation Surprise Index (CSI)
##### import libraries import pandas as pd import numpy as np from fti02param import * FTI = pd.read_csv('FTI.csv', index_col=0) MSI = pd.read_csv('MSI.csv', index_col=0) #print(MSI) CSI = pd.concat([FTI, MSI], axis=1, join='inner') print(CSI) CSI = CSI.dropna(how="any") print(CSI) CSI['CSI'] = CSI['FTI'] / CSI['MSI'] print(CSI) CSI.to_csv("CSI.csv") #print(FTI_pct_rank_dma) #CSIdma = CSI.rolling(window=FTI_pct_rank_dma).mean() #print(CSIdma) #CSIdma = CSIdma.dropna(how="any") #print(CSIdma) #CSIdma.to_csv("CSIdma.csv") |
12. Moving average of CSI (moving average time period in days: FTI_pct_rank_dma)
##### import libraries import pandas as pd import numpy as np from fti02param import * CSI = pd.read_csv('CSI.csv', index_col=0) #print(CSI) #CSI = pd.concat([FTI, MSI], axis=1, join='inner') #print(CSI) #CSI = CSI.dropna(how="any") #print(CSI) #CSI['CSI'] = CSI['FTI'] / CSI['MSI'] print(CSI) #CSI.to_csv("CSI.csv") #print(FTI_pct_rank_dma) CSIdma = CSI['CSI'].rolling(window=FTI_pct_rank_dma).mean() #print(CSIdma) CSIdma = CSIdma.dropna(how="any") #print(CSIdma) CSIdma.to_csv("CSIdma.csv") |
13. CSI percentile rank (percentile rank time period: FTI_pct_rank_dtp)
##### import libraries import pandas as pd import numpy as np from fti02param import * CSIdma = pd.read_csv('CSIdma.csv', index_col=0) #print(FTIdma) #type(FTIdma) #<class 'pandas.core.frame.DataFrame'> #print(FTI_pct_rank_dtp) CSIpctrank = CSIdma.rolling(FTI_pct_rank_dtp).apply(lambda x: pd.Series(x).rank().values[-1])/FTI_pct_rank_dtp #print(FTIpctrank) CSIpctrank = CSIpctrank.dropna(how="any") #print(FTIpctrank) CSIpctrank.to_csv("CSIpctrank.csv") |
Reference
https://link.springer.com/article/10.1057/jam.2013.27
No comments:
Post a Comment