2013-05-12 51 views
0

有誰知道如何在不使用processing.py或其他第三方庫或平臺的情況下讀取處理中的python文件?我有一個python文件,可以生成一個文本,並希望我的處理實時讀取它。但是,由於我的三行文本不是同時生成的,因此「loadStrings」的內容似乎有些問題,我的第三行始終顯示比我的前兩行慢一點,所以處理草圖將它弄亂了在某一點。如何處理這個問題?在處理中讀取python

String[] lines; 
PFont font; 

void setup() { 
    size(800, 600); 
    font = createFont("Arial", 16); 
    frameRate(2); 
    //lines = loadStrings("output.txt"); 
} 

void draw() { 
    background(255); 
    textFont(font); 
    fill(0); 
    lines = loadStrings("output.txt"); 
    for (int i = 0; i < 3; i++) { 

    String word = lines[i]; 

    text(word, random(width), random(height)); 
    } 
// noLoop(); 
} 

我的蟒蛇速寫:

class MarkovGenerator(object): 

    def __init__(self, n, max): 
    self.n = n # order (length) of ngrams 
    self.max = max # maximum number of elements to generate 
    self.ngrams = dict() # ngrams as keys; next elements as values 
    beginning = tuple(["China", "is"]) # beginning ngram of every line 
    beginning2 = tuple(["But", "it"]) 
    self.beginnings = list() 
    self.beginnings.append(beginning) 
    self.beginnings.append(beginning2) 



    def tokenize(self, text): 
    return text.split(" ") 

    def feed(self, text): 

    tokens = self.tokenize(text) 

    # discard this line if it's too short 
    if len(tokens) < self.n: 
     return 

    # store the first ngram of this line 
    #beginning = tuple(tokens[:self.n]) 
    #self.beginnings.append(beginning) 

    for i in range(len(tokens) - self.n): 

     gram = tuple(tokens[i:i+self.n]) 
     next = tokens[i+self.n] # get the element after the gram 

     # if we've already seen this ngram, append; otherwise, set the 
     # value for this key as a new list 
     if gram in self.ngrams: 
     self.ngrams[gram].append(next) 
     else: 
     self.ngrams[gram] = [next] 

    # called from generate() to join together generated elements 
    def concatenate(self, source): 

     haha = list() 
     kk = list() 

     haha = " ".join(source) 
     ouou = haha.split(".") 
     kk = ouou[0] 

     return kk 
    # return " ".join(source) 

    # generate a text from the information in self.ngrams 
    def generate(self,i): 

    from random import choice 

    # get a random line beginning; convert to a list. 
     #current = choice(self.beginnings) 
    current = self.beginnings[i] 
    output = list(current) 

    for i in range(self.max): 
     if current in self.ngrams: 
     possible_next = self.ngrams[current] 
     next = choice(possible_next) 
     output.append(next) 
     # get the last N entries of the output; we'll use this to look up 
     # an ngram in the next iteration of the loop 
     current = tuple(output[-self.n:]) 
     else: 
     break 

    output_str = self.concatenate(output) 
    return output_str 


    def search_facebook_posts(self): 
     import json 
     import urllib 
     import time 
     FB = list() 
     query = {'q': "feel", 'limit': 200} 
     resp = urllib.urlopen('http://graph.facebook.com/search?' + urllib.urlencode(query)) 
     data = json.loads(resp.read()) 
     posts = list() 
     for item in data['data']: 
      if 'message' in item: 
       posts.append(item) 


     for post in posts: 
      FB.append(post['message'].encode('ascii', 'replace')) 

     return FB 

    def together(self): 
     import re 
     sentences = list() 
     manysentences = list() 
     togetherlist = self.search_facebook_posts() 
     for line in togetherlist: 
      line = line.replace(".", "\n") 
      line = line.replace(",", "\n") 
      line = line.replace("?", "\n") 
      line = line.replace(";", "\n") 
      line = line.replace("!", "\n") 
      line = line.replace("...", "\n") 
      line = line.replace(":", "\n") 
      sentenca = line.split("\n") 
      for i in range(len(sentenca)): 
      sentences.append(sentenca[i]) 

     for sentence in sentences: 
      if "feel" in sentence: 
       for matching in re.findall(r'\b[Ff]eel(.*)$',sentence): 
       manysentences.append(matching) 

     sentencesnew = random.choice(manysentences) 
     haha = "I feel" + sentencesnew 
     return haha 


    def namelist(self): 
     import random 
     namelisty = list() 
     for line in open("namelist"): 
      namelisty.append(line+"said") 

     thisname = random.choice(namelisty) 

     return thisname 


if __name__ == '__main__': 

    import sys 
    import random 
    import codecs 


    generator = MarkovGenerator(n=2, max=16) 
    for line in open("china"): 
    line = line.strip() 
    generator.feed(line) 


    print generator.together()+"." 
+0

沒有人知道如何處理它? – Douuga 2013-05-12 23:24:11

+0

如果您手動編寫有效的「output.txt」,您的Processing草圖是否可以工作?爲了避免只留下部分寫入的「output.txt」文件,請在Python腳本中寫入臨時文件並在最後重命名它。 – jfs 2013-05-13 03:02:49

回答

1

你可以使用Java的運行系統,並Process類:

import java.io.*; 

    void setup() { 
    try { 
     Process p = Runtime.getRuntime().exec("python /path/to/your/script.py arguments"); 

     BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); 

     String line=null; 

     while((line=input.readLine()) != null) { 
      System.out.println(line); 
     } 

     int exitWith = p.waitFor(); 
     System.out.println("Exited with error code "+exitWith); 
    } 
    catch (Exception e) { 
     e.printStackTrace(); 
    } 
    } 

因此,例如,以最小的ten.py:

import sys 

if len(sys.argv) > 0: 
    print sys.argv[1] * 10 

我會看到一條消息搽10次:

import java.io.*; 

    void setup() { 
    try { 
     Process p = Runtime.getRuntime().exec("python /Users/hm/Documents/Processing/tests/CMD/ten.py hello"); 

     BufferedReader input = new BufferedReader(new InputStreamReader(p.getInputStream())); 

     String line=null; 

     while((line=input.readLine()) != null) { 
      System.out.println(line); 
     } 

     int exitWith = p.waitFor(); 
     System.out.println("Exited with error code "+exitWith); 
    } 
    catch (Exception e) { 
     e.printStackTrace(); 
    } 
    } 
+0

你也應該[處理stderr如果在子進程中有任何錯誤](http://stackoverflow.com/q/14165517/4279) – jfs 2013-05-13 03:08:32

+0

謝謝,我的代碼是從twitter獲取數據,並打印出我抓取的消息。它顯示「以錯誤代碼1退出」 – Douuga 2013-05-17 02:45:11