TypeError: __init__() takes 2 positional arguments but 3 were given

30/07/2019

0

EU TENHO UMA FUNÇÃO DE EXTRACAO DE API DO INSTAGRAM E ESTA ME REPORTANDO ESSE ERRO.

ESTA RODANDO EM PYTHON 3.7

ERRO:
Traceback (most recent call last): File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 346, in run_http_function result = _function_handler.invoke_user_function(flask.request) File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 217, in invoke_user_function return call_user_function(request_or_event) File "/env/local/lib/python3.7/site-packages/google/cloud/functions/worker.py", line 210, in call_user_function return self._user_function(request_or_event) File "/user_code/main.py", line 180, in st_scrapper dtags = pd.read_gbq(query_tags, dialect='standard', index_col="Hashtag") File "/env/local/lib/python3.7/site-packages/pandas/io/gbq.py", line 176, in read_gbq **kwargs File "/env/local/lib/python3.7/site-packages/pandas_gbq/gbq.py", line 969, in read_gbq use_bqstorage_api=use_bqstorage_api, File "/env/local/lib/python3.7/site-packages/pandas_gbq/gbq.py", line 372, in __init__ self.client = self.get_client() File "/env/local/lib/python3.7/site-packages/pandas_gbq/gbq.py", line 425, in get_client client_info=client_info, File "/env/local/lib/python3.7/site-packages/google/cloud/bigquery/client.py", line 169, in __init__ self._connection = Connection(self, client_info=client_info) File "/env/local/lib/python3.7/site-packages/google/cloud/bigquery/_http.py", line 33, in __init__ super(Connection, self).__init__(client, client_info) TypeError: __init__() takes 2 positional arguments but 3 were given


[code=python]
import pandas as pd
import numpy as np
from datetime import datetime, date, timedelta
import time
from google.cloud import storage
import io
import re
import requests
import gcsfs


def bq_date(x):
if len(str(x.day)) == 1:
day = "0" + str(x.day)
else:
day = str(x.day)

if len(str(x.month)) == 1:
month = "0" + str(x.month)
else:
month = str(x.month)

return "".format(x.year, month, day)


def list_gcs_objs(bucket, prefix):
storage_client = storage.Client()
bucket_check = storage_client.get_bucket(bucket)
blob_list = list(bucket_check.list_blobs(prefix=prefix))
obj_paths = list()
if len(blob_list) <= 1:
print("Folder empty\\n")
return obj_paths
else:
count = 1
while count < len(blob_list):
obj_paths.append(blob_list[count].name)
count += 1
return obj_paths


def upload_to_gcs(bucket, object_key, data):
storage_client = storage.Client(bucket)
bucket_up = storage_client.get_bucket(bucket)
blob_up = bucket_up.blob(object_key)
response = blob_up.upload_from_string(data)
return (response)


def getInstagramStoriesFeed(base_url):
fields = "?fields=id,caption,media_type,media_url,permalink,timestamp,username"
return base_url + fields


def getStories_Insights(post_id, access_token):
base = "https://graph.facebook.com/v3.2/"
arequest = "/insights?access_token=&metric=".format(post_id, access_token) + \\
"impressions,reach,replies,exits,taps_forward,taps_back"
return base + arequest


def scrapeInstagramStories(page_id, access_token, since_date, until_date):
global ndf, sf
from datetime import date
scrape_starttime = date.today()
base_url = "https://graph.facebook.com/v3.2"
node = "/{}/stories?fields=".format(page_id)
fields = "id,caption,media_type,permalink,timestamp,username"
parameters = "&limit=100&access_token=".format(access_token)
anchor = since_date
after = ''

print("Scraping {} Instagram Page: {}\\n".format(page_id, scrape_starttime))
ctr = 1
count = 0
while (anchor >= since_date) & (count < 10):
after = '' if after is '' else "&after={}".format(after)
url = base_url + node + fields + parameters + after

content = requests.get(url).json()
sf = pd.DataFrame.from_dict(content['data'])

if len(sf) > 0:
sf['timestamp'] = pd.to_datetime(sf.timestamp, infer_datetime_format=True)
sf['data'] = sf.timestamp.apply(lambda x: x.date())
anchor = sf.data.min()

# if there is no next page, we're done.
if 'paging' in content:
after = content['paging']['cursors']['after']
if ctr == 1:
ndf = sf.copy()
else:
ndf = pd.concat([sf, ndf], sort=False)
ctr += 1
else:
pass

count += 1

if ndf.empty:
return ndf

date_index = list(pd.date_range(start=since_date, end=until_date))
ndf['timestamp'] = pd.to_datetime(ndf.timestamp, infer_datetime_format=True)
ndf['data'] = ndf.timestamp.apply(lambda x: x.date())

ndf['data'] = np.where(ndf.data.isin(date_index),
np.nan,
ndf.data)

ndf.dropna(subset=['data'], inplace=True)
ndf['data'] = pd.to_datetime(ndf.data, infer_datetime_format=True)

impressions = {}
reach = {}
replies = {}
exits = {}
tf = {}
tb = {}

for post in ndf.id.unique():
aux_url = getStories_Insights(post, access_token)
insights = requests.get(aux_url).json()
if len(insights) > 0:
impressions.update({post: insights['data'][0]['values'][0]['value']})
reach.update({post: insights['data'][1]['values'][0]['value']})
replies.update({post: insights['data'][2]['values'][0]['value']})
exits.update({post: insights['data'][3]['values'][0]['value']})
tf.update({post: insights['data'][4]['values'][0]['value']})
tb.update({post: insights['data'][5]['values'][0]['value']})
else:
impressions.update({post: 0})
reach.update({post: 0})
replies.update({post: 0})
exits.update({post: 0})
tf.update({post: 0})
tb.update({post: 0})

ndf['impressions'] = ndf.id.map(impressions)
ndf['reach'] = ndf.id.map(reach)
ndf['replies'] = ndf.id.map(replies)
ndf['exits'] = ndf.id.map(exits)
ndf['taps_forward'] = ndf.id.map(tf)
ndf['taps_back'] = ndf.id.map(tb)

ndf['id'] = ndf.id.astype('category')
# ndf['caption'] = ndf.caption.astype('category')
# ndf['media_type'] = ndf.media_type.astype('category')
# ndf['permalink'] = ndf.permalink.astype('category')
# ndf['username'] = ndf.username.astype('category')

ndf['impressions'] = ndf.impressions.astype('int64')
ndf['reach'] = ndf.reach.astype('int64')
ndf['replies'] = ndf.replies.astype('int64')
ndf['exits'] = ndf.exits.astype('int64')
ndf['taps_forward'] = ndf.taps_forward.astype('int64')
ndf['taps_back'] = ndf.taps_back.astype('int64')

return ndf


def st_scrapper(request):
global ndf
since_date = (date.today() - timedelta(1))
until_date = date.today()
today = date.today().strftime("%Y%m%d")

mybucket = "gdata-dn-gshow-sandbox"
mainprefix = "AD/INS/"

# List FB Avaiable Data

maindata = list_gcs_objs(mybucket, mainprefix)
ins_dates = [x[-12:-4] for x in maindata]

# Queries
query_tags = "SELECT * FROM `globoid.AD_gshow_hashtags`"

dtags = pd.read_gbq(query_tags, dialect='standard', index_col="Hashtag")
tags = dtags['Produto'].to_dict()

user_token = "X"

gshow_pages =
'
Responder

Posts

30/07/2019

Sadasd

É um pouco complicado responder essa pois o código que você colocou não possui o init, mas esse erro ocorre quando em um método há uma determinada quantidade de parâmetros mas você passa uma quantidade diferente de argumentos na hora de chamá-lo, no caso seu init tem 2 parâmetros, mas você passou 3 argumentos.
Responder

Assista grátis a nossa aula inaugural

Assitir aula

Saiba por que programar é uma questão de
sobrevivência e como aprender sem riscos

Assistir agora

Utilizamos cookies para fornecer uma melhor experiência para nossos usuários, consulte nossa política de privacidade.

Aceitar