Using Vector AutoRegression VAR in statsmodels - python-2.7

I am trying to run a VAR model with the following script.
import statsmodels
import statsmodels.tsa.api as sm
from statsmodels.tsa.api import VAR
tsBitcoin_frame = tsBitcoin.to_frame()
tsSP500_frame = tsSP500.to_frame()
forVar = [tsBitcoin_frame, tsSP500_frame]
dataForVar = pd.concat(forVar, axis =1)
model = VAR(dataForVar)
results = model.fit(2)
results.summary()
However python is giving me the following error "name 'VAR' is not defined"
I am using statsmodels version 0.8.0. I even tried using command sm.VAR instead VAR but then python wouldn't print the statistics of VAR model. Does anyone know why this is happening, how can I solve it or how can implement the VAR model in python? Thanks!

Sorry I figured out my mistake. I was not putting print before results.summary and should have left the line from statsmodels.tsa.api import VAR. Thanks though!

Related

Cannot iterate over AbstractOrderedScalarSet before it has been constructed (initialized)

I have just started with pyomo and Python, and trying to create a simple model but have a problem with adding a constraint.
I followed the following example from GitHub
https://github.com/brentertainer/pyomo-tutorials/blob/master/introduction/02-lp-pyomo.ipynb
import pandas as pd
import pyomo.environ as pe
import pyomo.opt as po
#DATA
T=3;
CH=2;
time = ['t{0}'.format(t+1) for t in range(T)]
CHP=['CHP{0}'.format(s+1) for s in range(CH)]
#Technical characteristic
heat_maxprod = {'CHP1': 250,'CHP2': 250} #Only for CHPS
#MODEL
seq=pe.ConcreteModel
### SETS
seq.CHP = pe.Set(initialize = CHP)
seq.T = pe.Set(initialize = time)
### PARAMETERS
seq.heat_maxprod = pe.Param(seq.CHP, initialize = heat_maxprod) #Max heat production
### VARIABLES
seq.q_DA=pe.Var(seq.CHP, seq.T, domain=pe.Reals)
### CONSTRAINTS
##Maximum and Minimum Heat Production
seq.Heat_DA1 = pe.ConstraintList()
for t in seq.T:
for s in seq.CHP:
seq.Heat_DA1.add( 0 <= seq.q_DA[s,t])
seq.Heat_DA2 = pe.ConstraintList()
for t in seq.T:
for s in seq.CHP:
seq.Heat_DA2.add( seq.q_DA[s,t] <= seq.heat_maxprod[s])
### OBJECTIVE
seq.obj=Objective(expr=sum( seq.C_fuel[s]*(seq.rho_heat[s]*seq.q_DA[s,t]) for t in seq.T for s in seq.CHP))
When I run the program I am getting the following error:
RuntimeError: Cannot iterate over AbstractOrderedScalarSet 'AbstractOrderedScalarSet' before it has been constructed (initialized): 'iter' is an attribute on an Abstract component and cannot be accessed until the component has been fully constructed (converted to a Concrete component) using AbstractModel.create_instance() or AbstractOrderedScalarSet.construct().
Can someone, please, help with an issue? Thanks!
P.S. I know that the resulting answer for the problem is zero, I just want to make it work in terms of correct syntaxis.
In this line of code:
seq=pe.ConcreteModel
You are missing parenthesis. So, I think you are just creating an alias for the function instead of calling it.
Try:
seq=pe.ConcreteModel()

Can't connect to Online Sharepoint using Python

I'am trying to display all sharepoint's list name but i'am getting this error :
No handlers could be found for logger "office365.runtime.auth.saml_token_provider.SamlTokenProvider._process_service_token_response"
This is my code :
from office365.runtime.auth.authentication_context import AuthenticationContext
from office365.sharepoint.client_context import ClientContext
url = 'https://abc.sharepoint.com/sites/siteName/'
ctx_auth = AuthenticationContext(url)
if ctx_auth.acquire_token_for_user(username='username#abc.com'
,password ='password'):
ctx = ClientContext(url, ctx_auth)
lists = ctx.web.lists
ctx.load(lists)
ctx.execute_query()
for l in lists:
print(l.properties["Title"])
Thanks
I tested below code here with python 2.7 and it works well.
from office365.runtime.auth.authentication_context import AuthenticationContext
from office365.sharepoint.client_context import ClientContext
tenant_url= "https://company.sharepoint.com"
site_url="https://company.sharepoint.com/sites/sname"
ctx_auth = AuthenticationContext(tenant_url)
if ctx_auth.acquire_token_for_user("abc#company.onmicrosoft.com","mypassword"):
ctx = ClientContext(site_url, ctx_auth)
lists = ctx.web.lists
ctx.load(lists)
ctx.execute_query()
for l in lists:
print(l.properties["Title"])
else:
print(ctx_auth.get_last_error())
Result:
If this is related to ADFS, please refer to this closed question:
https://github.com/vgrem/Office365-REST-Python-Client/issues/85
BR
Well i found a solution to get data for specific sharepoint List
from shareplum import Site
from shareplum import Office365
import json
import csv
import pandas
authcookie = Office365('https://abc.sharepoint.com/', username='username', password='password').GetCookies()
site = Site('https://abc.sharepoint.com/sites/SitesName/', authcookie=authcookie)
sp_list = site.List('ListName')
#print(sp_list)
data = sp_list.GetListItems(fields=['FieldName1','FieldName2'])
c = pandas.read_json(json.dumps(data)).to_csv("output.csv")

Scraping Jira and entering it in Google Sheet (filling cells

I need help figuring out how to finish my script.
So far I can get the info I need from Jira and I now know the basics of entering stuff into google sheets but my problem is when I try to combine the too.
If I just run the scraper for Jira I get 50 different values but when add the googlesheet code it only enters the last value out of the 50.
Anyone know how I can fix this?
I am using python and working with PYCHARM.
Thanks!
# coding=utf-8
from jira.client import JIRA
import gspread
from oauth2client.service_account import ServiceAccountCredentials
SCOPES = ["https://spreadsheets.google.com/feeds"]
credentials = ServiceAccountCredentials.from_json_keyfile_name("blank", SCOPES)
connection = gspread.authorize(credentials)
options = {'server': 'https://jira.blank.com/'}
jira = JIRA(options, basic_auth=('blank', 'blank'))
projects = jira.projects()
for i in jira.search_issues('filter=11152'):
print i
worksheet = connection.open("blank").sheet1
cell_list = worksheet.range('A2:A51')
for cell in cell_list:
cell.value = i
# Update in batch
worksheet.update_cells(cell_list)
print("Done updating, check the spreadsheet now")

Code is Not able to find my function in Python(Spark) class

I need some help regarding the error in code. My Code consists of retrieving the zomato reviews and storing it in HDFS and again reading it performing Recommender Analtyics on it. I am getting a problem regarding my function is not recognizing in pyspark code. I am not entirely pasting the code as it might be confusing so i am writing a small similar use case for your easy understanding.
I am trying to read a file from local and converting it to dataframe from rdd and performing some operations and again converting it to rdd and performing map operation to have delimiter by '|' and then save it to HDFS.
When i try to call self.filter_data(y) in lambda func of check function its not recognizing and giving me error as
Exception: It appears that you are attempting to reference
SparkContext from a broadcast variable, action, or transformation.
SparkContext can only be used on the driver, not in code that it run
on workers. For more information, see SPARK-5063.
****CAN ANY ONE HELP ME WHY MY FILTER_DATA FUNCTION IS NOT RECOGNISING? SHOULD I NEED TO ADD ANY THING OR ANY THING WRONG IN THE WAY I AM CALLING. PLEASE HELP ME. THANKS IN ADVANCE****
INPUT VALUE
starting
0|0|ffae4f|0|https://b.zmtcdn.com/data/user_profile_pictures/565/aed32fa2eb18bb4a5a3ba426870fd565.jpg?fit=around%7C100%3A100&crop=100%3A100%3B%2A%2C%2A|https://www.zomato.com/akellaram87?utm_source=api_basic_user&utm_medium=api&utm_campaign=v2.1|2.5|FFBA00|Well...|unknown|16946626|2017-08-01T00-25-43.455182Z|30059877|Have been here for a quick bite for lunch, ambience and everything looked good, food was okay but presentation was not very appealing. We or...|2017-04-15 16:38:38|Big Foodie|6|Venkata Ram Akella|akellaram87|Bad Food|0.969352505662|0|0|0|0|0|0|1|1|0|0|1|0|0|0.782388212399
ending
starting
1|0|ffae4f|0|https://b.zmtcdn.com/data/user_profile_pictures/4d1/d70d7a57e1bfdf296ff4db3d8daf94d1.jpg?fit=around%7C100%3A100&crop=100%3A100%3B%2A%2C%2A|https://www.zomato.com/users/sm4-2011696?utm_source=api_basic_user&utm_medium=api&utm_campaign=v2.1|1|CB202D|Avoid!|unknown|16946626|2017-08-01T00-25-43.455182Z|29123338|Giving a 1.0 rating because one cannot proceed with writing a review, without rating it. This restaurant deserves a 0 star rating. The qual...|2017-01-04 10:54:53|Big Foodie|4|Sm4|unknown|Bad Service|0.964402034541|0|1|0|0|0|0|0|1|0|0|0|1|0|0.814540622345
ending
My code:
if __name__== '__main__':
import os,logging,sys,time,pandas,json;from subprocess
import PIPE,Popen,call;from datetime import datetime, time, timedelta
from pyspark import SparkContext, SparkConf
conf = SparkConf().setAppName('test')
sc = SparkContext(conf = conf,pyFiles=['/bdaas/exe/nlu_project/spark_classifier.py','/bdaas/exe/spark_zomato/other_files/spark_zipcode.py','/bdaas/exe/spark_zomato/other_files/spark_zomato.py','/bdaas/exe/spark_zomato/conf_files/spark_conf.py','/bdaas/exe/spark_zomato/conf_files/date_comparision.py'])
from pyspark.sql import Row, SQLContext,HiveContext
from pyspark.sql.functions import lit
sqlContext = HiveContext(sc)
import sys,logging,pandas as pd
import spark_conf
n = new()
n.check()
class new:
def __init__(self):
print 'entered into init'
def check(self):
data = sc.textFile('file:///bdaas/src/spark_dependencies/classifier_data/final_Output.txt').map(lambda x: x.split('|')).map(lambda z: Row(restaurant_id=z[0], rating = z[1], review_id = z[2],review_text = z[3],rating_color = z[4],rating_time_friendly=z[5],rating_text=z[6],time_stamp=z[7],likes=z[8],comment_count =z[9],user_name = z[10],user_zomatohandle=z[11],user_foodie_level = z[12],user_level_num=z[13],foodie_color=z[14],profile_url=z[15],profile_image=z[16],retrieved_time=z[17]))
data_r = sqlContext.createDataFrame(data)
data_r.show()
d = data_r.rdd.collect()
print d
data_r.rdd.map(lambda x: list(x)).map(lambda y: self.filter_data(y)).collect()
print data_r
def filter_data(self,y):
s = str()
for i in y:
print i.encode('utf-8')
if i != '':
s = s + i.encode('utf-8') + '|'
print s[0:-1]
return s[0:-1]

How to retrieve data from geoalchemy2 Query result?

Code snippet
from dbinit import session
from geoalchemy2 import Geometry, func
result = session.query(func.ST_AsText('POINT(100 100)'))
How to retrieve the data from this result object?
I have figured out the solution.
re = result.all()