#!/usr/bin/env python 
# -*- coding: UTF-8 -*-

import urllib.request, re, os, time, requests
def recup_tweets_wikinews() :
    #flux RSS du twitter de Wikinews fr
    file = urllib.request.urlopen('http://twitrss.me/twitter_user_to_rss/?user=WikinewsESC2017')

    listeurl = []
    for i in file :
        i = i.strip()
        i = i.decode('utf-8')
        
        if i.startswith('<guid>') :
            i = i.split('<guid>')
            i = i[1].split("</guid>")
            i = i[0]
            listeurl.append(i)

    code = []
    for i in listeurl :
        i = i.split('/status/')
        i = i[1]
        code.append(i)

    #Récupération des éléments de remplissage du modèle {{Twitter}}
    nom_auteur = []
    nom_de_suivi = []
    date = []
    textep = []
    b = 0
    for i in code :
        print(i)
        if b == 0 :
            tweet = urllib.request.urlopen('https://twitter.com/WikinewsESC2017/status/'+i)
            for a in tweet :
                
                a = a.strip()
                a = a.decode('utf-8')
                print(a)
                if a.startswith('<strong class="fullname show-popup-with-id " data-aria-label-part>') :
                    a = a.split('<strong class="fullname show-popup-with-id " data-aria-label-part>')
                    a = a[1].split('</strong>')
                    a = a[0]
                    print(a)
                    nom_auteur.append(a)
                
                if a.startswith('<span>&rlm;</span><span class="username js-action-profile-name" data-aria-label-part><s>@</s>') :
                    a = a.split('<span>&rlm;</span><span class="username js-action-profile-name" data-aria-label-part><s>@</s><b>')
                    a = a[1].split('</b></span>')
                    a = a[0]
                    a = '@' + a
                    print(a)
                    nom_de_suivi.append(a)
                
                if a.startswith('<a href="/WikinewsESC2017/status/'+i+'" class="tweet-timestamp js-permalink js-nav js-tooltip" title="') :
                    a = a.split('js-permalink js-nav js-tooltip" title="')
                    a = a[1].split('"  data-conversation-id="')
                    a = a[0]
                    print(a)
                    date.append(a)
                
                if a.startswith('<title>Wikinews ESC2017 on Twitter: &quot;') :
                    a = a.split('<title>Wikinews ESC2017 on Twitter: &quot;') 
                    a = a[1].split('&quot;</title>')
                    a = a[0]
                    print(a)
                    textep.append(a)
    
    #Ajoute un <nowiki> autour de l'url pour contourner la black liste de méta
    #voir aussi pour juste supprimer le https://
    texte = []  
    for i in textep :
        i = i.replace('http://','<nowiki>https://')
        i = i.replace('https://','<nowiki>https://')
        try :
            g = i.split('https://')
            i = g[0]
            g = g[1].split(' ')
        
            try :
                j = g[1:]
            except :
                j = ''
            
            x = i + g[0]
            b = 0
            jp = ''
            for a in j :
                jp = jp + j[b]
                b = b + 1
            try :
                urlT = x.split('<nowiki>')
                urlT = 'https://'+urlT[1]
                fileT = urllib.request.urlopen(urlT)
                for p in fileT :
                    p = p.decode('utf-8')
                    if p.startswith('<title>') :
                        p = p.split('<title>')
                        p = p[1]
                        p = p.split(' — Wikinews</title>')
                        t = p[0]
                        print(t)
                print(urlT)    
                x = i +" '''{{nobr|[["+t+"|→ Lire l'article]]}}''' "+ jp
                x =  x.replace('<nowiki>','')
            except :
                x = x +'</nowiki> '+ jp
            texte.append(x)
        
        except :
            texte.append(i)     
        
    return code, nom_auteur, nom_de_suivi, listeurl, texte, date



#Remplissage du modèle
def recup_modele(code, nom_auteur, nom_de_suivi, listeurl, texte, date) :

    contenu = '<small>Dernière actualisation le ~~~~~</small>'
    a = 0
    for i in code : 
        bloc1 = '<!-- <code tweet>'+ i +'</code tweet> -->'
        bloc2 = '{{Twitter|nom auteur=' + nom_auteur[a] + '|nom de suivi=' + nom_de_suivi[a] + '|texte =' + texte[a] + '|lien profil=https://twitter.com/'+ nom_de_suivi[a] +'|lien id='+ listeurl[a ]+'|date='+date[a]+'}}'
        a = a + 1
        #print(bloc1+bloc2) 
        contenu = contenu+"\n"+bloc1+bloc2
       
    return contenu


k = 0
while k < 1 :
    try :
        code, nom_auteur, nom_de_suivi, listeurl, texte, date = recup_tweets_wikinews()
        contenu = recup_modele(code, nom_auteur, nom_de_suivi, listeurl, texte, date)
    
        username = 'Matthobot'
        password = 'Matthobot@onemb59hk1offac6qpdboa65vueo6jba'
        baseurl = 'https://fr.wikinews.org/w/'
        summary = "[Action Bot] Mise à jour Le Live Wikinews"
        message = contenu
        #sectiontitle = bloc1
        title = 'Discussion_utilisateur:Mattho69/Le_Live_Wikinews'

        # Login request
        payload = {'action': 'query', 'format': 'json', 'utf8': '', 'meta': 'tokens', 'type': 'login'}
        r1 = requests.post(baseurl + 'api.php', data=payload)

        #print('1')
        # login confirm
        login_token = r1.json()['query']['tokens']['logintoken']
        payload = {'action': 'login', 'format': 'json', 'utf8': '', 'lgname': username, 'lgpassword': password, 'lgtoken': login_token}
        r2 = requests.post(baseurl + 'api.php', data=payload, cookies=r1.cookies)

        #print('2')
        # get edit token2
        params3 = '?format=json&action=query&meta=tokens&continue='
        r3 = requests.get(baseurl + 'api.php' + params3, cookies=r2.cookies)
        edit_token = r3.json()['query']['tokens']['csrftoken']

        #print('3')
        edit_cookie = r2.cookies.copy()
        edit_cookie.update(r3.cookies)
        #print (edit_token)
        
        # save action
        payload = {'action': 'edit', 'assert': 'user', 'format': 'json', 'bot': '1', 'utf8': '', 'text': message,'summary': summary, 'title': title, 'token': edit_token}
        r4 = requests.post(baseurl + 'api.php', data=payload, cookies=edit_cookie)
        
        print('Le Live Wikinews')
        time.sleep(3600)
    
    except :
        print('Non mis à jour')
        time.sleep(3600)
    
    

#Récupération des Tweets déjà présents

file = urllib.request.urlopen('https://fr.wikinews.org/w/index.php?title=Discussion_utilisateur:Mattho69/Le_Live_Wikinews&action=raw')

codeexit = []
listetwexit = []
for i in file :
    print(i)
    i = i.decode('utf-8')
    print(i)
    if i.startswith('<code tweet>') :
        listetwexit.append(i)
        print(i)
        a = i.split('<code tweet>')
        a = a[1].split('</code tweet>')
        a = a[0]
        codeexit.append(a)
        print(a)
        print(i)
862787181307281409
b'<!DOCTYPE html>\n'
b'<html lang="en" data-scribe-reduced-action-queue="true">\n'
b'  <head>\n'
b'    \n'
b'    \n'
b'    \n'
b'    \n'
b'    \n'
b'    \n'
b'    \n'
b'    <meta charset="utf-8">\n'
b'    \n'
b'    <noscript><meta http-equiv="refresh" content="0; URL=https://mobile.twitter.com/i/nojs_router?path=%2FWikinewsESC2017%2Fstatus%2F862787181307281409"></noscript>\n'
b'    \n'
b'    \n'
b'  \n'
b'  <script id="bouncer_terminate_iframe" nonce="7KH5pCi7hU+IEycniuokig==">\n'
b'    if (window.top != window) {\n'
b"  window.top.postMessage({'bouncer': true, 'event': 'complete'}, '*');\n"
b'}\n'
b'  </script>\n'
b'  <script id="swift_action_queue" nonce="7KH5pCi7hU+IEycniuokig==">\n'
b'    !function(){function e(e){if(e||(e=window.event),!e)return!1;if(e.timestamp=(new Date).getTime(),!e.target&&e.srcElement&&(e.target=e.srcElement),document.documentElement.getAttribute("data-scribe-reduced-action-queue"))for(var t=e.target;t&&t!=document.body;){if("A"==t.tagName)return;t=t.parentNode}return i("all",o(e)),a(e)?(document.addEventListener||(e=o(e)),e.preventDefault=e.stopPropagation=e.stopImmediatePropagation=function(){},y?(v.push(e),i("captured",e)):i("ignored",e),!1):(i("direct",e),!0)}function t(e){n();for(var t,r=0;t=v[r];r++){var a=e(t.target),i=a.closest("a")[0];if("click"==t.type&&i){var o=e.data(i,"events"),u=o&&o.click,c=!i.hostname.match(g)||!i.href.match(/#$/);if(!u&&c){window.location=i.href;continue}}a.trigger(e.event.fix(t))}window.swiftActionQueue.wasFlushed=!0}function r(){for(var e in b)if("all"!=e)for(var t=b[e],r=0;r<t.length;r++)console.log("actionQueue",c(t[r]))}function n(){clearTimeout(w);for(var e,t=0;e=h[t];t++)document["on"+e]=null}function a(e){if(!e.target)return!1;var t=e.target,r=(t.tagName||"").toLowerCase();if(e.metaKey)return!1;if(e.shiftKey&&"a"==r)return!1;if(t.hostname&&!t.hostname.match(g))return!1;if(e.type.match(p)&&s(t))return!1;if("label"==r){var n=t.getAttribute("for");if(n){var a=document.getElementById(n);if(a&&f(a))return!1}else for(var i,o=0;i=t.childNodes[o];o++)if(f(i))return!1}return!0}function i(e,t){t.bucket=e,b[e].push(t)}function o(e){var t={};for(var r in e)t[r]=e[r];return t}function u(e){for(;e&&e!=document.body;){if("A"==e.tagName)return e;e=e.parentNode}}function c(e){var t=[];e.bucket&&t.push("["+e.bucket+"]"),t.push(e.type);var r,n,a=e.target,i=u(a),o="",c=e.timestamp&&e.timestamp-d;return"click"===e.type&&i?(r=i.className.trim().replace(/\\s+/g,"."),n=i.id.trim(),o=/[^#]$/.test(i.href)?" ("+i.href+")":"",a=\'"\'+i.innerText.replace(/\\n+/g," ").trim()+\'"\'):(r=a.className.trim().replace(/\\s+/g,"."),n=a.id.trim(),a=a.tagName.toLowerCase(),e.keyCode&&(a=String.fromCharCode(e.keyCode)+" : "+a)),t.push(a+o+(n&&"#"+n)+(!n&&r?"."+r:"")),c&&t.push(c),t.join(" ")}function f(e){var t=(e.tagName||"").toLowerCase();return"input"==t&&"checkbox"==e.getAttribute("type")}function s(e){var t=(e.tagName||"").toLowerCase();return"textarea"==t||"input"==t&&"text"==e.getAttribute("type")||"true"==e.getAttribute("contenteditable")}for(var m,d=(new Date).getTime(),l=1e4,g=/^([^\\.]+\\.)*twitter\\.com$/,p=/^key/,h=["click","keydown","keypress","keyup"],v=[],w=null,y=!0,b={captured:[],ignored:[],direct:[],all:[]},k=0;m=h[k];k++)document["on"+m]=e;w=setTimeout(function(){y=!1},l),window.swiftActionQueue={buckets:b,flush:t,logActions:r,wasFlushed:!1}}();\n'
b'  </script>\n'
b'  <script id="composition_state" nonce="7KH5pCi7hU+IEycniuokig==">\n'
b'    !function(){function t(t){t.target.setAttribute("data-in-composition","true")}function n(t){t.target.removeAttribute("data-in-composition")}document.addEventListener&&(document.addEventListener("compositionstart",t,!1),document.addEventListener("compositionend",n,!1))}();\n'
b'  </script>\n'
b'\n'
b'    <link rel="stylesheet" href="https://abs.twimg.com/a/1494569968/css/t1/twitter_core.bundle.css">\n'
b'  <link rel="stylesheet" href="https://abs.twimg.com/a/1494569968/css/t1/twitter_more_1.bundle.css">\n'
b'  <link rel="stylesheet" href="https://abs.twimg.com/a/1494569968/css/t1/twitter_more_2.bundle.css">\n'
b'\n'
b'    <link rel="dns-prefetch" href="https://pbs.twimg.com">\n'
b'    <link rel="dns-prefetch" href="https://t.co">\n'
b'      <link rel="preload" href="https://abs.twimg.com/k/en/init.en.21528f3e9fb7ca1d4bae.js" as="script">\n'
b'      <link rel="preload" href="https://abs.twimg.com/k/en/17.commons.en.2648106fe269bd2c5a2a.js" as="script">\n'
b'      <link rel="preload" href="https://abs.twimg.com/k/en/13.pages_permalink.en.4e6c52c39f8602280ebb.js" as="script">\n'
b'\n'
b'      <title>Wikinews ESC2017 on Twitter: &quot;#ESC2017 #Eurovision Concours Eurovision de la chanson 2017 : les r\xc3\xa9sultats de la seconde demi-finale https://t.co/EQ2D6LEUDS https://t.co/58YkIrAfqb&quot;</title>\n'
Non mis à jour
---------------------------------------------------------------------------
IndexError                                Traceback (most recent call last)
<ipython-input-1-ce0053eee4c0> in <module>()
    134     try :
--> 135         code, nom_auteur, nom_de_suivi, listeurl, texte, date = recup_tweets_wikinews()
    136         contenu = recup_modele(code, nom_auteur, nom_de_suivi, listeurl, texte, date)

<ipython-input-1-ce0053eee4c0> in recup_tweets_wikinews()
     63                     a = a.split('<title>Wikinews ESC2017  on Twitter: &quot;')
---> 64                     a = a[1].split('&quot;</title>')
     65                     a = a[0]

IndexError: list index out of range

During handling of the above exception, another exception occurred:

KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-1-ce0053eee4c0> in <module>()
    174     except :
    175         print('Non mis à jour')
--> 176         time.sleep(3600)
    177 
    178 

KeyboardInterrupt: