2016-01-20 44 views
1

我目前正在編寫一個多梯度下降算法,其中我使用kriged函數。 我的問題是,我找不到如何獲得kriged函數的梯度(我試圖使用linearize,但我不知道如何使它工作)。Openmdao中kriged函數的梯度

from __future__ import print_function 

from six import moves 
from random import shuffle 
import sys 
import numpy as np 
from numpy import linalg as LA 
import math 
from openmdao.braninkm import F, G, DF, DG 

from openmdao.api import Group, Component,IndepVarComp 
from openmdao.api import MetaModel 
from openmdao.api import KrigingSurrogate, FloatKrigingSurrogate 

def rand_lhc(b, k): 
    # Calculates a random Latin hypercube set of n points in k dimensions within [0,n-1]^k hypercube. 
    arr = np.zeros((2*b, k)) 
    row = list(moves.xrange(-b, b)) 
    for i in moves.xrange(k): 
     shuffle(row) 
     arr[:, i] = row 
    return arr/b*1.2 


class TrigMM(Group): 
    ''' FloatKriging gives responses as floats ''' 

    def __init__(self): 
     super(TrigMM, self).__init__() 

     # Create meta_model for f_x as the response 
     F_mm = self.add("F_mm", MetaModel()) 
     F_mm.add_param('X', val=np.array([0., 0.])) 
     F_mm.add_output('f_x:float', val=0., surrogate=FloatKrigingSurrogate()) 
     # F_mm.add_output('df_x:float', val=0., surrogate=KrigingSurrogate().linearize) 


     #F_mm.linearize('X', 'f_x:float') 
     #F_mm.add_output('g_x:float', val=0., surrogate=FloatKrigingSurrogate()) 
     print('init ok') 
     self.add('p1', IndepVarComp('X', val=np.array([0., 0.]))) 
     self.connect('p1.X','F_mm.X')  

     # Create meta_model for f_x as the response 
     G_mm = self.add("G_mm", MetaModel()) 
     G_mm.add_param('X', val=np.array([0., 0.])) 
     G_mm.add_output('g_x:float', val=0., surrogate=FloatKrigingSurrogate()) 
     #G_mm.add_output('df_x:float', val=0., surrogate=KrigingSurrogate().linearize) 

     #G_mm.linearize('X', 'g_x:float') 
     self.add('p2', IndepVarComp('X', val=np.array([0., 0.]))) 
     self.connect('p2.X','G_mm.X')     

from openmdao.api import Problem 

prob = Problem() 
prob.root = TrigMM() 
prob.setup() 

u=4 
v=3 

#training avec latin hypercube 

prob['F_mm.train:X'] = rand_lhc(20,2) 
prob['G_mm.train:X'] = rand_lhc(20,2) 

#prob['F_mm.train:X'] = rand_lhc(10,2) 
#prob['G_mm.train:X'] = rand_lhc(10,2) 
#prob['F_mm.linearize:X'] = rand_lhc(10,2) 
#prob['G_mm.linearize:X'] = rand_lhc(10,2) 
datF=[] 
datG=[] 
datDF=[] 
datDG=[] 

for i in range(len(prob['F_mm.train:X'])): 
    datF.append(F(np.array([prob['F_mm.train:X'][i]]),u)) 
    #datG.append(G(np.array([prob['F_mm.train:X'][i]]),v)) 
data_trainF=np.fromiter(datF,np.float) 

for i in range(len(prob['G_mm.train:X'])): 
    datG.append(G(np.array([prob['G_mm.train:X'][i]]),v)) 
data_trainG=np.fromiter(datG,np.float) 

prob['F_mm.train:f_x:float'] = data_trainF 
#prob['F_mm.train:g_x:float'] = data_trainG 
prob['G_mm.train:g_x:float'] = data_trainG 

回答

1

你打算寫一個多梯度下降驅動程序嗎?如果是這樣,則OpenMDAO使用calc_gradient方法計算Problem級別從參數到輸出的梯度。

如果你看看源代碼,pyoptsparse司機:

https://github.com/OpenMDAO/OpenMDAO/blob/master/openmdao/drivers/pyoptsparse_driver.py

_gradfunc方法是一個回調函數返回的約束和目標的梯度相對於設計變量。 Metamodel組件爲我們所有代理商(我認爲)都有內置的分析梯度,所以您甚至不必在那裏申報。

如果這不是你想要做的,那麼我可能需要更多關於你的應用程序的信息。