AutoEM desktop app design

new page

chart的字体太大了!已经从12改到10号,怎么好像还是有点不好看!

核心参数的内容需要思考一下!

重点是要把对比体现出来!user的目标是设计最好的电机,最好是比较出来的!

日志的位置可以考虑调整,参数设置的地方需要更改!chart 那里考虑做tab!同时需要看的还有其他好几个图,比如齿槽扭力的chart就值得看一眼!这个改动感觉需要一两周才搞得完啊!

debug的工作量有点大!



n5321 | 2025年10月20日 00:30

plotly cdn版本生成chart

plotly本质上还是js!在desktop生成chart的时候确实是太慢了!效果:

价值是提供了交互式的chart!能够实线highcharts一样的效果,但是背后都是web front page!但是又没有浏览器摆在前面,比在chrome里面debug还烦些。

这个cdn版本写起来简单一点,但是加载时间要等几秒钟,太烦了!

要改用local js,居然是个麻烦事。


import sys
import numpy as np
from PyQt6.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QWidget, QPushButton, QProgressBar
from PyQt6.QtWebEngineWidgets import QWebEngineView
from PyQt6.QtCore import pyqtSlot
import plotly.graph_objects as go

class PlotlyGuiDemo(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("PyQt6 + Plotly Dynamic Update")
self.resize(1200, 700)

# --- 主布局 ---
central_widget = QWidget()
self.setCentralWidget(central_widget)
layout = QVBoxLayout(central_widget)

# --- 浏览器控件 ---
self.web_view = QWebEngineView()
layout.addWidget(self.web_view)

# --- 进度条 ---
self.progress = QProgressBar()
self.progress.setMaximum(100)
layout.addWidget(self.progress)

# --- 更新按钮 ---
self.update_btn = QPushButton("更新数据")
layout.addWidget(self.update_btn)
self.update_btn.clicked.connect(self.update_plot)

# --- 初始化数据 ---
self.n_points = 50
self.torque = np.linspace(0, 100, self.n_points)
self.speed = np.sin(self.torque/10)*1000 + 2000
self.current = np.cos(self.torque/15)*50 + 100
self.power = self.speed * self.torque / 10
self.efficiency = np.clip(np.random.rand(self.n_points)*100, 80, 100)

# --- 创建 Plotly Figure ---
self.fig = go.Figure()

# Y traces
self.speed_trace = go.Scatter(x=self.torque, y=self.speed, name='Speed (rpm)', line=dict(color='#1f77b4'))
self.efficiency_trace = go.Scatter(x=self.torque, y=self.efficiency, name='Efficiency (%)', line=dict(color='#ff7f0e'), yaxis='y2')
self.current_trace = go.Scatter(x=self.torque, y=self.current, name='Current (A)', line=dict(color='#2ca02c'), yaxis='y3')
self.power_trace = go.Scatter(x=self.torque, y=self.power, name='Power (W)', line=dict(color='#d62728'), yaxis='y4')

for trace in [self.speed_trace, self.efficiency_trace, self.current_trace, self.power_trace]:
self.fig.add_trace(trace)

# 布局配置多 Y
self.fig.update_layout(
xaxis=dict(title='Torque (N·m)'),
yaxis=dict(title='Speed (rpm)', side='left'),
yaxis2=dict(title='Efficiency (%)', overlaying='y', side='right', anchor='x'),
yaxis3=dict(title='Current (A)', overlaying='y', side='right', anchor='free', position=0.92),
yaxis4=dict(title='Power (W)', overlaying='y', side='right', anchor='free', position=0.99),
legend=dict(orientation='h', yanchor="top", y=-0.2, xanchor="center", x=0.5),
height=500
)

# --- 浏览器控件加载 HTML ---
self.web_view.loadProgress.connect(self.progress.setValue)
self.web_view.loadFinished.connect(self.on_load_finished)
self.load_figure()

def load_figure(self):
# 第一次加载 HTML
html = self.fig.to_html(include_plotlyjs='cdn', full_html=True)
self.web_view.setHtml(html)

@pyqtSlot(bool)
def on_load_finished(self, ok):
if ok:
self.progress.setValue(100)
print("图表加载完成!")
else:
print("图表加载失败!")

def update_plot(self):
# 模拟新数据
self.speed = np.sin(self.torque/10 + np.random.rand())*1000 + 2000
self.efficiency = np.clip(np.random.rand(self.n_points)*100, 80, 100)
self.current = np.cos(self.torque/15 + np.random.rand())*50 + 100
self.power = self.speed * self.torque / 10



# 只更新 trace
self.fig.data[0].y = self.speed
self.fig.data[1].y = self.efficiency
self.fig.data[2].y = self.current
self.fig.data[3].y = self.power

# 刷新浏览器控件(只生成 HTML,不重新生成 plotly.js
html = self.fig.to_html(include_plotlyjs=False, full_html=False)
self.web_view.setHtml(html)



if __name__ == "__main__":
app = QApplication(sys.argv)
window = PlotlyGuiDemo()
window.show()
sys.exit(app.exec())
用另外一个版本的cdn,效果似乎更好!

import sys
import os
import json
import numpy as np
from PyQt6.QtWidgets import QApplication, QMainWindow, QVBoxLayout, QWidget, QPushButton, QProgressBar
from PyQt6.QtWebEngineWidgets import QWebEngineView
from PyQt6.QtCore import pyqtSlot, QUrl
import plotly.graph_objects as go # <--- 就是在这里添加缺失的导入

# 开启远程调试端口 (用于排查问题)
# 运行此脚本后, Chrome/Edge 浏览器中打开 http://localhost:9223 即可看到开发者工具
os.environ['QTWEBENGINE_REMOTE_DEBUGGING'] = "9223"


class PlotlyGuiDemo(QMainWindow):
def __init__(self):
super().__init__()
self.setWindowTitle("PyQt6 + Plotly Dynamic Update (已修正)")
self.resize(1200, 700)

central_widget = QWidget()
self.setCentralWidget(central_widget)
layout = QVBoxLayout(central_widget)

self.web_view = QWebEngineView()
layout.addWidget(self.web_view)

self.progress = QProgressBar()
self.progress.setMaximum(100)
layout.addWidget(self.progress)

self.update_btn = QPushButton("更新数据")
layout.addWidget(self.update_btn)
self.update_btn.clicked.connect(self.update_plot)

self.n_points = 50
self.torque = np.linspace(0, 100, self.n_points)
self.speed = np.sin(self.torque / 10) * 1000 + 2000
self.current = np.cos(self.torque / 15) * 50 + 100
self.power = self.speed * self.torque / 10
self.efficiency = np.clip(np.random.rand(self.n_points) * 100, 80, 100)

# 现在 Python 认识 go 是什么了
self.fig = go.Figure()

self.fig.add_trace(go.Scatter(x=self.torque, y=self.speed, name='Speed (rpm)'))
self.fig.add_trace(go.Scatter(x=self.torque, y=self.efficiency, name='Efficiency (%)', yaxis='y2'))
self.fig.add_trace(go.Scatter(x=self.torque, y=self.current, name='Current (A)', yaxis='y3'))
self.fig.add_trace(go.Scatter(x=self.torque, y=self.power, name='Power (W)', yaxis='y4'))

self.fig.update_layout(
xaxis=dict(title='Torque (N·m)'),
yaxis=dict(title='Speed (rpm)', side='left', color='#1f77b4'),
yaxis2=dict(title='Efficiency (%)', overlaying='y', side='right', anchor='x', color='#ff7f0e'),
yaxis3=dict(title='Current (A)', overlaying='y', side='right', anchor='free', position=0.92,
color='#2ca02c'),
yaxis4=dict(title='Power (W)', overlaying='y', side='right', anchor='free', position=0.99, color='#d62728'),
legend=dict(orientation='h', yanchor="top", y=-0.2, xanchor="center", x=0.5),
height=500
)

self.web_view.loadProgress.connect(self.progress.setValue)
self.web_view.loadFinished.connect(self.on_load_finished)
self.load_figure()

def load_figure(self):
plotly_cdn = "https://cdn.plot.ly/plotly-latest.min.js"

fig_data_json = json.dumps(self.fig.to_dict()['data'])
fig_layout_json = json.dumps(self.fig.to_dict()['layout'])

html = f"""
<html>
<head>
<meta charset="utf-8" />
<script src="{plotly_cdn}"></script>
</head>
<body>
<div id="plotly-div" style="width:100%; height:100%;"></div>
<script>
var fig_data = {fig_data_json};
var fig_layout = {fig_layout_json};
Plotly.newPlot('plotly-div', fig_data, fig_layout, {{responsive: true}});
</script>
</body>
</html>
"""
self.web_view.setHtml(html)

@pyqtSlot(bool)
def on_load_finished(self, ok):
if ok:
self.progress.setValue(100)
print("图表加载完成!")
else:
print("图表加载失败!")

def update_plot(self):
self.speed = np.sin(self.torque / 10 + np.random.rand()) * 1000 + 2000
self.efficiency = np.clip(np.random.rand(self.n_points) * 100, 80, 100)
self.current = np.cos(self.torque / 15 + np.random.rand()) * 50 + 100
self.power = self.speed * self.torque / 10

new_data_y = [
self.speed.tolist(),
self.efficiency.tolist(),
self.current.tolist(),
self.power.tolist()
]

js_code = f"Plotly.restyle('plotly-div', {{'y': {json.dumps(new_data_y)}}}, [0, 1, 2, 3]);"
self.web_view.page().runJavaScript(js_code)


if __name__ == "__main__":
app = QApplication(sys.argv)
window = PlotlyGuiDemo()
window.show()
sys.exit(app.exec())


n5321 | 2025年10月18日 08:34

python 版AutoEM

问题:

数据获取已经搞定!

有了一个最简单的可以work的GUI版本!

问题:
结果图表需要整理!

Log,目前的log未填充文本进文档。需要添加进去!

参数不用table,用edit_line!_后续更改!

button keep one!

文档加密的问题!后续添加?keep MVP框架?!




n5321 | 2025年10月14日 00:00

AutoEM online tools(在线工具)_debug

还是要把Matlab拉出来做科学计算!

django 下面的code 关联数据太多!

所以在python下面的科学计算有点乱,或者直接说自己matlab还是用的更溜一点!于是改用matlab来重构一下excel表的数学模型!

问题1:不算是bug! stall torque是通过拟合5个点的数据做一维线性拟合——推导出来的! excel里面用的软件自带的trend!matlab or python 可用的空间太多了!但是总归还是call some function! 最后总还是有细微的差异!1012.9mNm VS 1013.2mNm 据说原因是excel与matlab python底层的实现方式和浮点数运算精度存在微小的差异。

奇奇怪怪 step by step的check!

两个小小差异!

  1. 1个电流的数据点从0.81手误输成了0.83.

  2. gcm转换成mNm的系数原来用的是 GCM_TO_MNM = 0.0980665;改成一个更粗糙一点的 GCM_TO_MNM = 0.0981;

最后计算获得的数据一致了!


n5321 | 2025年9月16日 23:43

pycharm_debug_settings.

pycharm下面step by step 的debug设置!

第一步:删除当前的运行配置

  1. 在 PyCharm 的右上角,点击你的配置名称(这里是 mysite)。

  2. 在下拉菜单中选择 "Edit Configurations..."。

  3. 在打开的窗口左侧,选中 mysite 这个配置。

  4. 点击上方的减号( - )按钮将其删除。

  5. 点击 "Apply" 或 "OK" 保存。

第二步:让 PyCharm 自动创建新的配置

  1. 在你的项目中找到并打开 manage.py 文件。

  2. 在文件中,找到 if __name__ == '__main__': 这一行。

  3. 点击行号旁边出现的绿色三角形(▶️)“运行”按钮。

  4. 在弹出的菜单中选择 "Run 'runserver'" 或 "Debug 'runserver'"。

这个操作会让 PyCharm 自动创建一个新的、通常是正确的 "Django server" 配置。

第三步:验证新配置并运行

  1. PyCharm 会使用新配置来尝试运行或调试你的项目。

  2. 如果需要,你可以再次进入 "Edit Configurations..." 查看自动生成的配置。它应该和你之前的设置非常相似,但这次它应该能正确地将参数传递给 manage.py

  3. 确保 "Python interpreter"(Python 解释器)和 "Working directory"(工作目录)都设置正确。从你的截图来看,之前的设置是正确的,新配置应该会自动继承它们。


n5321 | 2025年9月14日 21:41

AutoEM online tools(在线工具)

autoem是一个德昌excel表——直流电机计算工具的替代升级版。

工程师并不需要改变自己的工作方式、工作习惯。熟悉的配方、熟悉的味道,轻松设计出更好的电机方案!客户即刻了解到——你为他创造了更好的价值!

  1. 性能计算上:通过5个点来trend 五轴曲线,calculate关键参数。通过新旧绕组方案的对比,来预判新方案的性能——与原来一致!

  2. 升级项:

    1. 打破区域壁垒,从本地数据升级为云端数据!从PC file 升级为html 网页!随时随地,拥有权限的人都可以编辑,查看,修改。——客户随时可以看到你做出来的努力!

    2. 升级UX:

      1. 把测功数据,性能曲线,数据表在一个页面展示,不要在多个sheet之前频繁切换!

      2. 详细动态的chart,鼠标挪过去就可以看到相关数据点的参数 !不再只是一个大概

      3. (待添加):新旧方案,多个设计方案的性能对比!对比性能曲线,关键参数,数据表。

    3. 数据库管理!——从file 变成database!

      1. 从散落的文档,到产品系列,产品,设计方案,测工结果——四级归类的category,整理好性能方案数据。

      2. 详细的meta data:可以查看创建的时间,创建者,样本编号

      3. 在一张清单下列出所以电机开发项目!一切尽在掌控之中。


n5321 | 2025年9月14日 08:45

Programming as Theory Building

Peter Naur 1985

Peter Naur’s classic 1985 essay “Programming as Theory Building” argues that a program is not its source code. A program is a shared mental construct (he uses the word theory) that lives in the minds of the people who work on it. If you lose the people, you lose the program. The code is merely a written representation of the program, and it’s lossy, so you can’t reconstruct a program from its code.

Introduction

The present discussion is a contribution to the understanding of what programming is. It suggests that programming properly should be regarded as an activity by which the programmers form or achieve a certain kind of insight, a theory, of the matters at hand. This suggestion is in contrast to what appears to be a more common notion, that programming should be regarded as a production of a program and certain other texts.

Some of the background of the views presented here is to be found in certain observations of what actually happens to programs and the teams of programmers dealing with them, particularly in situations arising from unexpected and perhaps erroneous program executions or reactions, and on the occasion of modifications of programs. The difficulty of accommodating such observations in a production view of programming suggests that this view is misleading. The theory building view is presented as an alternative.

A more general background of the presentation is a conviction that it is important to have an appropriate understanding of what programming is. If our understanding is inappropriate we will misunderstand the difficulties that arise in the activity and our attempts to overcome them will give rise to conflicts and frustrations.

In the present discussion some of the crucial background experience will first be outlined. This is followed by an explanation of a theory of what programming is, denoted the Theory Building View. The subsequent sections enter into some of the consequences of the Theory Building View.

Programming and the Programmers’ Knowledge

I shall use the word programming to denote the whole activity of design and implementation of programmed solutions. What I am concerned with is the activity of matching some significant part and aspect of an activity in the real world to the formal symbol manipulation that can be done by a program running on a computer. With such a notion it follows directly that the programming activity I am talking about must include the development in time corresponding to the changes taking place in the real world activity being matched by the program execution, in other words program modifications.

One way of stating the main point I want to make is that programming in this sense primarily must be the programmers’ building up knowledge of a certain kind, knowledge taken to be basically the programmers’ immediate possession, any documentation being an auxiliary, secondary product.

As a background of the further elaboration of this view given in the following sections, the remainder of the present section will describe some real experience of dealing with large programs that has seemed to me more and more significant as I have pondered over the problems. In either case the experience is my own or has been communicated to me by persons having first hand contact with the activity in question.

Case 1 concerns a compiler. It has been developed by a group A for a Language L and worked very well on computer X. Now another group B has the task to write a compiler for a language L + M, a modest extension of L, for computer Y . Group B decides that the compiler for L developed by group A will be a good starting point for their design, and get a contract with group A that they will get support in the form of full documentation, including annotated program texts and much additional written design discussion, and also personal advice. The arrangement was effective and group B managed to develop the compiler they wanted. In the present context the significant issue is the importance of the personal advice from group A in the matters that concerned how to implement the extensions M to the language. During the design phase group B made suggestions for the manner in which the extensions should be accommodated and submitted them to group A for review. In several major cases it turned out that the solutions suggested by group B were found by group A to make no use of the facilities that were not only inherent in the structure of the existing compiler but were discussed at length in its documentation, and to be based instead on additions to that structure in the form of patches that effectively destroyed its power and simplicity. The members of group A were able to spot these cases instantly and could propose simple and effective solutions, framed entirely within the existing structure. This is an example of how the full program text and additional documentation is insufficient in conveying to even the highly motivated group B the deeper insight into the design, that theory which is immediately present to the members of group A.

In the years following these events the compiler developed by group B was taken over by other programmers of the same organization, without guidance from group A. Information obtained by a member of group A about the compiler resulting from the further modification of it after about 10 years made it clear that at that later stage the original powerful structure was still visible, but made entirely ineffective by amorphous additions of many different kinds. Thus, again, the program text and its documentation has proved insufficient as a carrier of some of the most important design ideas.

Case 2 concerns the installation and fault diagnosis of a large real–time system for monitoring industrial production activities. The system is marketed by its producer, each delivery of the system being adapted individually to its specific environment of sensors and display devices. The size of the program delivered in each installation is of the order of 200,000 lines. The relevant experience from the way this kind of system is handled concerns the role and manner of work of the group of installation and fault finding programmers. The facts are, first that these programmers have been closely concerned with the system as a full time occupation over a period of several years, from the time the system was under design. Second, when diagnosing a fault these programmers rely almost exclusively on their ready knowledge of the system and the annotated program text, and are unable to conceive of any kind of additional documentation that would be useful to them. Third, other programmers’ groups who are responsible for the operation of particular installations of the system, and thus receive documentation of the system and full guidance on its use from the producer’s staff, regularly encounter difficulties that upon consultation with the producer’s installation and fault finding programmer are traced to inadequate understanding of the existing documentation, but which can be cleared up easily by the installation and fault finding programmers.

The conclusion seems inescapable that at least with certain kinds of large programs, the continued adaption, modification, and correction of errors in them, is essentially dependent on a certain kind of knowledge possessed by a group of programmers who are closely and continuously connected with them.

Ryle’s Notion of Theory

If it is granted that programming must involve, as the essential part, a building up of the programmers’ knowledge, the next issue is to characterize that knowledge more closely. What will be considered here is the suggestion that the programmers’ knowledge properly should be regarded as a theory, in the sense of Ryle. Very briefly, a person who has or possesses a theory in this sense knows how to do certain things and in addition can support the actual doing with explanations, justifications, and answers to queries, about the activity of concern. It may be noted that Ryle’s notion of theory appears as an example of what K. Popper calls unembodied World 3 objects and thus has a defensible philosophical standing. In the present section we shall describe Ryle’s notion of theory in more detail.

Ryle develops his notion of theory as part of his analysis of the nature of intellectual activity, particularly the manner in which intellectual activity differs from, and goes beyond, activity that is merely intelligent. In intelligent behaviour the person displays, not any particular knowledge of facts, but the ability to do certain things, such as to make and appreciate jokes, to talk grammatically, or to fish. More particularly, the intelligent performance is characterized in part by the person’s doing them well, according to certain criteria, but further displays the person’s ability to apply the criteria so as to detect and correct lapses, to learn from the examples of others, and so forth. It may be noted that this notion of intelligence does not rely on any notion that the intelligent behaviour depends on the person’s following or adhering to rules, prescriptions, or methods. On the contrary, the very act of adhering to rules can be done more or less intelligently; if the exercise of intelligence depended on following rules there would have to be rules about how to follow rules, and about how to follow the rules about following rules, etc. in an infinite regress, which is absurd.

What characterizes intellectual activity, over and beyond activity that is merely intelligent, is the person’s building and having a theory, where theory is understood as the knowledge a person must have in order not only to do certain things intelligently but also to explain them, to answer queries about them, to argue about them, and so forth. A person who has a theory is prepared to enter into such activities; while building the theory the person is trying to get it.

The notion of theory in the sense used here applies not only to the elaborate constructions of specialized fields of enquiry, but equally to activities that any person who has received education will participate in on certain occasions. Even quite unambitious activities of everyday life may give rise to people’s theorizing, for example in planning how to place furniture or how to get to some place by means of certain means of transportation.

The notion of theory employed here is explicitly not confined to what may be called the most general or abstract part of the insight. For example, to have Newton’s theory of mechanics as understood here it is not enough to understand the central laws, such as that force equals mass times acceleration. In addition, as described in more detail by Kuhn, the person having the theory must have an understanding of the manner in which the central laws apply to certain aspects of reality, so as to be able to recognize and apply the theory to other similar aspects. A person having Newton’s theory of mechanics must thus understand how it applies to the motions of pendulums and the planets, and must be able to recognize similar phenomena in the world, so as to be able to employ the mathematically expressed rules of the theory properly.

The dependence of a theory on a grasp of certain kinds of similarity between situations and events of the real world gives the reason why the knowledge held by someone who has the theory could not, in principle, be expressed in terms of rules. In fact, the similarities in question are not, and cannot be, expressed in terms of criteria, no more than the similarities of many other kinds of objects, such as human faces, tunes, or tastes of wine, can be thus expressed.

The Theory To Be Built by the Programmer

In terms of Ryle’s notion of theory, what has to be built by the programmer is a theory of how certain affairs of the world will be handled by, or supported by, a computer program. On the Theory Building View of programming the theory built by the programmers has primacy over such other products as program texts, user documentation, and additional documentation such as specifications.

In arguing for the Theory Building View, the basic issue is to show how the knowledge possessed by the programmer by virtue of his or her having the theory necessarily, and in an essential manner, transcends that which is recorded in the documented products. The answers to this issue is that the programmer’s knowledge transcends that given in documentation in at least three essential areas:

1) The programmer having the theory of the program can explain how the solution relates to the affairs of the world that it helps to handle. Such an explanation will have to be concerned with the manner in which the affairs of the world, both in their overall characteristics and their details, are, in some sense, mapped into the program text and into any additional documentation. Thus the programmer must be able to explain, for each part of the program text and for each of its overall structural characteristics, what aspect or activity of the world is matched by it. Conversely, for any aspect or activity of the world the programmer is able to state its manner of mapping into the program text. By far the largest part of the world aspects and activities will of course lie outside the scope of the program text, being irrelevant in the context. However, the decision that a part of the world is relevant can only be made by someone who understands the whole world. This understanding must be contributed by the programmer.

2) The programmer having the theory of the program can explain why each part of the program is what it is, in other words is able to support the actual program text with a justification of some sort. The final basis of the justification is and must always remain the programmer’s direct, intuitive knowledge or estimate. This holds even where the justification makes use of reasoning, perhaps with application of design rules, quantitative estimates, comparisons with alternatives, and such like, the point being that the choice of the principles and rules, and the decision that they are relevant to the situation at hand, again must in the final analysis remain a matter of the programmer’s direct knowledge.

3) The programmer having the theory of the program is able to respond constructively to any demand for a modification of the program so as to support the affairs of the world in a new manner. Designing how a modification is best incorporated into an established program depends on the perception of the similarity of the new demand with the operational facilities already built into the program. The kind of similarity that has to be perceived is one between aspects of the world. It only makes sense to the agent who has knowledge of the world, that is to the programmer, and cannot be reduced to any limited set of criteria or rules, for reasons similar to the ones given above why the justification of the program cannot be thus reduced.

While the discussion of the present section presents some basic arguments for adopting the Theory Building View of programming, an assessment of the view should take into account to what extent it may contribute to a coherent understanding of programming and its problems. Such matters will be discussed in the following sections.

Problems and Costs of Program Modifications

A prominent reason for proposing the Theory Building View of programming is the desire to establish an insight into programming suitable for supporting a sound understanding of program modifications. This question will therefore be the first one to be taken up for analysis.

One thing seems to be agreed by everyone, that software will be modified. It is invariably the case that a program, once in operation, will be felt to be only part of the answer to the problems at hand. Also the very use of the program itself will inspire ideas for further useful services that the program ought to provide. Hence the need for ways to handle modifications.

The question of program modifications is closely tied to that of programming costs. In the face of a need for a changed manner of operation of the program, one hopes to achieve a saving of costs by making modifications of an existing program text, rather than by writing an entirely new program.

The expectation that program modifications at low cost ought to be possible is one that calls for closer analysis. First it should be noted that such an expectation cannot be supported by analogy with modifications of other complicated man–made constructions. Where modifications are occasionally put into action, for example in the case of buildings, they are well known to be expensive and in fact complete demolition of the existing building followed by new construction is often found to be preferable economically. Second, the expectation of the possibility of low cost program modifications conceivably finds support in the fact that a program is a text held in a medium allowing for easy editing. For this support to be valid it must clearly be assumed that the dominating cost is one of text manipulation. This would agree with a notion of programming as text production. On the Theory Building View this whole argument is false. This view gives no support to an expectation that program modifications at low cost are generally possible.

A further closely related issue is that of program flexibility. In including flexibility in a program we build into the program certain operational facilities that are not immediately demanded, but which are likely to turn out to be useful. Thus a flexible program is able to handle certain classes of changes of external circumstances without being modified.

It is often stated that programs should be designed to include a lot of flexibility, so as to be readily adaptable to changing circumstances. Such advice may be reasonable as far as flexibility that can be easily achieved is concerned. However, flexibility can in general only be achieved at a substantial cost. Each item of it has to be designed, including what circumstances it has to cover and by what kind of parameters it should be controlled. Then it has to be implemented, tested, and described. This cost is incurred in achieving a program feature whose usefulness depends entirely on future events. It must be obvious that built–in program flexibility is no answer to the general demand for adapting programs to the changing circumstances of the world.

In a program modification an existing programmed solution has to be changed so as to cater for a change in the real world activity it has to match. What is needed in a modification, first of all, is a confrontation of the existing solution with the demands called for by the desired modification. In this confrontation the degree and kind of similarity between the capabilities of the existing solution and the new demands has to be determined. This need for a determination of similarity brings out the merit of the Theory Building View. Indeed, precisely in a determination of similarity the shortcoming of any view of programming that ignores the central requirement for the direct participation of persons who possess the appropriate insight becomes evident. The point is that the kind of similarity that has to be recognized is accessible to the human beings who possess the theory of the program, although entirely outside the reach of what can be determined by rules, since even the criteria on which to judge it cannot be formulated. From the insight into the similarity between the new requirements and those already satisfied by the program, the programmer is able to design the change of the program text needed to implement the modification.

In a certain sense there can be no question of a theory modification, only of a program modification. Indeed, a person having the theory must already be prepared to respond to the kinds of questions and demands that may give rise to program modifications. This observation leads to the important conclusion that the problems of program modification arise from acting on the assumption that programming consists of program text production, instead of recognizing programming as an activity of theory building.

On the basis of the Theory Building View the decay of a program text as a result of modifications made by programmers without a proper grasp of the underlying theory becomes understandable. As a matter of fact, if viewed merely as a change of the program text and of the external behaviour of the execution, a given desired modification may usually be realized in many different ways, all correct. At the same time, if viewed in relation to the theory of the program these ways may look very different, some of them perhaps conforming to that theory or extending it in a natural way, while others may be wholly inconsistent with that theory, perhaps having the character of unintegrated patches on the main part of the program. This difference of character of various changes is one that can only make sense to the programmer who possesses the theory of the program. At the same time the character of changes made in a program text is vital to the longer term viability of the program. For a program to retain its quality it is mandatory that each modification is firmly grounded in the theory of it. Indeed, the very notion of qualities such as simplicity and good structure can only be understood in terms of the theory of the program, since they characterize the actual program text in relation to such program texts that might have been written to achieve the same execution behaviour, but which exist only as possibilities in the programmer’s understanding.

Program Life, Death, and Revival

A main claim of the Theory Building View of programming is that an essential part of any program, the theory of it, is something that could not conceivably be expressed, but is inextricably bound to human beings. It follows that in describing the state of the program it is important to indicate the extent to which programmers having its theory remain in charge of it. As a way in which to emphasize this circumstance one might extend the notion of program building by notions of program life, death, and revival. The building of the program is the same as the building of the theory of it by and in the team of programmers. During the program life a programmer team possessing its theory remains in active control of the program, and in particular retains control over all modifications. The death of a program happens when the programmer team possessing its theory is dissolved. A dead program may continue to be used for execution in a computer and to produce useful results. The actual state of death becomes visible when demands for modifications of the program cannot be intelligently answered. Revival of a program is the rebuilding of its theory by a new programmer team.

The extended life of a program according to these notions depends on the taking over by new generations of programmers of the theory of the program. For a new programmer to come to possess an existing theory of a program it is insufficient that he or she has the opportunity to become familiar with the program text and other documentation. What is required is that the new programmer has the opportunity to work in close contact with the programmers who already possess the theory, so as to be able to become familiar with the place of the program in the wider context of the relevant real world situations and so as to acquire the knowledge of how the program works and how unusual program reactions and program modifications are handled within the program theory. This problem of education of new programmers in an existing theory of a program is quite similar to that of the educational problem of other activities where the knowledge of how to do certain things dominates over the knowledge that certain things are the case, such as writing and playing a music instrument. The most important educational activity is the student’s doing the relevant things under suitable supervision and guidance. In the case of programming the activity should include discussions of the relation between the program and the relevant aspects and activities of the real world, and of the limits set on the real world matters dealt with by the program.

A very important consequence of the Theory Building View is that program revival, that is reestablishing the theory of a program merely from the documentation, is strictly impossible. Lest this consequence may seem unreasonable it may be noted that the need for revival of an entirely dead program probably will rarely arise, since it is hardly conceivable that the revival would be assigned to new programmers without at least some knowledge of the theory had by the original team. Even so the Theory Building View suggests strongly that program revival should only be attempted in exceptional situations and with full awareness that it is at best costly, and may lead to a revived theory that differs from the one originally had by the program authors and so may contain discrepancies with the program text.

In preference to program revival, the Theory Building View suggests, the existing program text should be discarded and the new–formed programmer team should be given the opportunity to solve the given problem afresh. Such a procedure is more likely to produce a viable program than program revival, and at no higher, and possibly lower, cost. The point is that building a theory to fit and support an existing program text is a difficult, frustrating, and time consuming activity. The new programmer is likely to feel torn between loyalty to the existing program text, with whatever obscurities and weaknesses it may contain, and the new theory that he or she has to build up, and which, for better or worse, most likely will differ from the original theory behind the program text.

Similar problems are likely to arise even when a program is kept continuously alive by an evolving team of programmers, as a result of the differences of competence and background experience of the individual programmers, particularly as the team is being kept operational by inevitable replacements of the individual members.

Method and Theory Building

Recent years has seen much interest in programming methods. In the present section some comments will be made on the relation between the Theory Building View and the notions behind programming methods.

To begin with, what is a programming method? This is not always made clear, even by authors who recommend a particular method. Here a programming method will be taken to be a set of work rules for programmers, telling what kind of things the programmers should do, in what order, which notations or languages to use, and what kinds of documents to produce at various stages.

In comparing this notion of method with the Theory Building View of programming, the most important issue is that of actions or operations and their ordering. A method implies a claim that program development can and should proceed as a sequence of actions of certain kinds, each action leading to a particular kind of documented result. In building the theory there can be no particular sequence of actions, for the reason that a theory held by a person has no inherent division into parts and no inherent ordering. Rather, the person possessing a theory will be able to produce presentations of various sorts on the basis of it, in response to questions or demands.

As to the use of particular kinds of notation or formalization, again this can only be a secondary issue since the primary item, the theory, is not, and cannot be, expressed, and so no question of the form of its expression arises.

It follows that on the Theory Building View, for the primary activity of the programming there can be no right method.

This conclusion may seem to conflict with established opinion, in several ways, and might thus be taken to be an argument against the Theory Building View. Two such apparent contradictions shall be taken up here, the first relating to the importance of method in the pursuit of science, the second concerning the success of methods as actually used in software development.

The first argument is that software development should be based on scientific manners, and so should employ procedures similar to scientific methods. The flaw of this argument is the assumption that there is such a thing as scientific method and that it is helpful to scientists. This question has been the subject of much debate in recent years, and the conclusion of such authors as Feyerabend, taking his illustrations from the history of physics, and Medawar, arguing as a biologist, is that the notion of scientific method as a set of guidelines for the practising scientist is mistaken.

This conclusion is not contradicted by such work as that of Polya on problem solving. This work takes its illustrations from the field of mathematics and leads to insight which is also highly relevant to programming. However, it cannot be claimed to present a method on which to proceed. Rather, it is a collection of suggestions aiming at stimulating the mental activity of the problem solver, by pointing out different modes of work that may be applied in any sequence.

The second argument that may seem to contradict the dismissal of method of the Theory Building View is that the use of particular methods has been successful, according to published reports. To this argument it may be answered that a methodically satisfactory study of the efficacy of programming methods so far never seems to have been made. Such a study would have to employ the well established technique of controlled experiments (cf. Brooks, 1980 or Moher and Schneider, 1982). The lack of such studies is explainable partly by the high cost that would undoubtedly be incurred in such investigations if the results were to be significant, partly by the problems of establishing in an operational fashion the concepts underlying what is called methods in the field of program development. Most published reports on such methods merely describe and recommend certain techniques and procedures, without establishing their usefulness or efficacy in any systematic way. An elaborate study of five different methods by C. Floyd and several co–workers concludes that the notion of methods as systems of rules that in an arbitrary context and mechanically will lead to good solutions is an illusion. What remains is the effect of methods in the education of programmers. This conclusion is entirely compatible with the Theory Building View of programming. Indeed, on this view the quality of the theory built by the programmer will depend to a large extent on the programmer’s familiarity with model solutions of typical problems, with techniques of description and verification, and with principles of structuring systems consisting of many parts in complicated interactions. Thus many of the items of concern of methods are relevant to theory building. Where the Theory Building View departs from that of the methodologists is on the question of which techniques to use and in what order. On the Theory Building View this must remain entirely a matter for the programmer to decide, taking into account the actual problem to be solved.

Programmers’ Status and the Theory Building View

The areas where the consequences of the Theory Building View contrast most strikingly with those of the more prevalent current views are those of the programmers’ personal contribution to the activity and of the programmers’ proper status.

The contrast between the Theory Building View and the more prevalent view of the programmers’ personal contribution is apparent in much of the common discussion of programming. As just one example, consider the study of modifiability of large software systems by Oskarsson. This study gives extensive information on a considerable number of modifications in one release of a large commercial system. The description covers the background, substance, and implementation, of each modification, with particular attention to the manner in which the program changes are confined to particular program modules. However, there is no suggestion whatsoever that the implementation of the modifications might depend on the background of the 500 programmers employed on the project, such as the length of time they have been working on it, and there is no indication of the manner in which the design decisions are distributed among the 500 programmers. Even so the significance of an underlying theory is admitted indirectly in statements such as that ‘decisions were implemented in the wrong block’ and in a reference to ‘a philosophy of AXE’. However, by the manner in which the study is conducted these admissions can only remain isolated indications.

More generally, much current discussion of programming seems to assume that programming is similar to industrial production, the programmer being regarded as a component of that production, a component that has to be controlled by rules of procedure and which can be replaced easily. Another related view is that human beings perform best if they act like machines, by following rules, with a consequent stress on formal modes of expression, which make it possible to formulate certain arguments in terms of rules of formal manipulation. Such views agree well with the notion, seemingly common among persons working with computers, that the human mind works like a computer. At the level of industrial management these views support treating programmers as workers of fairly low responsibility, and only brief education.

On the Theory Building View the primary result of the programming activity is the theory held by the programmers. Since this theory by its very nature is part of the mental possession of each programmer, it follows that the notion of the programmer as an easily replaceable component in the program production activity has to be abandoned. Instead the programmer must be regarded as a responsible developer and manager of the activity in which the computer is a part. In order to fill this position he or she must be given a permanent position, of a status similar to that of other professionals, such as engineers and lawyers, whose active contributions as employers of enterprises rest on their intellectual proficiency.

The raising of the status of programmers suggested by the Theory Building View will have to be supported by a corresponding reorientation of the programmer education. While skills such as the mastery of notations, data representations, and data processes, remain important, the primary emphasis would have to turn in the direction of furthering the understanding and talent for theory formation. To what extent this can be taught at all must remain an open question. The most hopeful approach would be to have the student work on concrete problems under guidance, in an active and constructive environment.

Conclusions

Accepting program modifications demanded by changing external circumstances to be an essential part of programming, it is argued that the primary aim of programming is to have the programmers build a theory of the way the matters at hand may be supported by the execution of a program. Such a view leads to a notion of program life that depends on the continued support of the program by programmers having its theory. Further, on this view the notion of a programming method, understood as a set of rules of procedure


n5321 | 2025年8月16日 12:33

Computer Experiments in Fluid Dynamics

by Francis H. Harlow and Jacob E. Fromm

Lab. y. Scheepsbouwkunde
Technische Hogeschool Delft
MAART 1965
REPRINT UIT: SCIENTIFIC AMERICAN

The fundamental behavior of fluids has traditionally been studied in tanks and wind tunnels. The capacities of the modern computer make it possible to do subtler experiments on the computer alone.

The natural philosophers of ancient Greece liked to do experiments in their heads. Centuries later Galileo developed the "thought" experiment into a fruitful method of inquiry and in our own time the method appealed strongly to such men as Albert Einstein and Enrico Fermi. Now the arrival of the modern electronic computer has made the method immensely more powerful and versatile. The computer makes it possible to simulate nature with numerical models and to investigate it in ways that have never been practicable before. Physical processes of enormous complexity are being examined minutely and with considerable realism. New hypotheses are being proved true or false. In physics, engineering, economics and even anthropology the computer has become a revolutionary tool.

One of the great attractions of experiment by computer is that it can avoid some of the uncertainties of measurement. Moreover, it provides a technique that can be classed as both theoretical and experimental. It is theoretical because it deals with abstract (that is, mathematical) statements of how things relate to one another. It is experimental because the computer is given only data specifying the initial state of a system and a set of rules for calculating its state at some time in the future. The computer worker has no more idea how this future state will unfold than has the traditional worker who conducts a comparable experiment in an actual laboratory.

To demonstrate the power of computer experiments we have chosen a single example involving the dynamic behavior of fluids. The particular experiment is a study of the flow of air past a rectangular rod.

At first thought the use of a computer for calculating this flow may seem to be a needlessly roundabout procedure. Would it not be simpler and more enlightening to put the rod in a wind tunnel and observe how air containing filaments of smoke flows around it? Actually it would not. For many of the questions to be investigated the physical experiment would be more complicated and costly, and it would not provide as much information as the experiment by computer.

For an example one can point to the problem of redesigning the Tacoma Narrows Bridge after it had been shaken to pieces by wind-induced vibrations soon after it was built. For the rebuilding of the bridge many elaborate models were made and tested again and again before a safe design was finally developed. Without doubt much of the cost and time spent on the problem could have been saved by computer calculations if the computers and appropriate numerical techniques had then been available. Experiments with numerical models can show the interaction of winds and a bridge in detail and produce answers in far less time than it takes to prepare a physical experiment.

The Soviet physicist A. A. Dorodnitsyn has remarked about such problems that computer calculation "can give a solution that is not only more rapid and cheaper but also more accurate" than the physical experiment itself.

Experimentation by computer also allows the investigation of many phenomena that are either inaccessible to direct study or involve factors that cannot be measured accurately. In the flow problem that we shall discuss, for example, it is difficult to measure directly in a wind tunnel the temperature distribution in the complicated downstream wake. Computer experiments, however, can yield a reliable description of the temperature distribution.

Another benefit of a computer experiment is that it usually affords far better control of the experimental conditions than is possible in a physical experiment. In wind tunnel studies, for instance, the experimenter must modify his interpretations to include the consideration of such effects as those due to the compressibility of the working fluid, variations in fluid viscosity and uncertainties in flow velocity. In a computer experiment such properties often can be excluded or included at will. Moreover, the computer program can isolate crucial features for examination, can eliminate irrelevant factors and can often assess the experimental uncertainties.

Finally, and most importantly, experiments by computer provide a test of the applicability of theory to the complicated phenomena under investigation. Do the equations of fluid dynamics really represent the correct theoretical description when applied to phenomena as complicated, say, as the oscillatory flow that develops in the wake of a retreating rectangular rod? For such problems the mathematician would like to obtain what he calls an analytical solution—the kind of exact solution that can be obtained by the processes of mathematical analysis. For problems in fluid dynamics, however, the necessary mathematical techniques for obtaining the complete solution have not yet been developed. The detailed results provided by a computer can actually help in the development of analytical solutions to the basic equations of fluid dynamics. Usually in the mathematical model of a complex problem some of the factors can only be approximated, and obtaining a realistic solution depends on finding out which features are crucial for a reasonable representation. With the help of computer experiments one tries to discover workable approximations that will simplify the mathematics needed to solve complicated problems—in this case a problem in oscillatory fluid flow.

The reader will find the "computer wind tunnel" experiment easier to follow if we consider briefly how a fluid behaves when it flows around a fixed object such as a rectangular rod.

At low speed the airflow is smooth and steady, a condition described as laminar flow. At a certain critical speed, which depends on the size of the rod, the laminar flow breaks down. For a rod one inch in height the critical speed in air is about one inch per second; the smaller the rod, the higher the speed at which turbulence begins. If the fluid is more viscous than air, laminar flow is more easily maintained and the critical speed for turbulence becomes higher.

Above the critical speed the airstream breaks up into vortices that are similar to the small whirlpools seen when a cup of coffee is stirred. These vortices are shed alternately from the top and bottom of the object placed in the airstream. This oscillating wake was first extensively studied by the aerodynamicist Theodor von Kármán and is known as a "von Kármán vortex street."

The oscillating wake sends out pulses that react back on the object itself. The vibration so produced is responsible for the sound made by a golf club swung rapidly through the air and for the whine of a ship's rigging in the wind. It was resonant vibration produced by the wind that caused the Tacoma Narrows Bridge to break and fall into the bay.

As the air speed increases, the vortices in the vortex street become more and more ragged and eventually break up into tiny eddies whose motion is almost entirely random. At this stage fully developed turbulence has been reached.

The known patterns of air motion past an object, then, give us certain definite phenomena to look for in the computer experiments. If the computer reproduces a vortex street and, at a later stage, turbulence, it will show that the theoretical understanding of fluid dynamics is accurate and therefore can be relied on to predict what will happen when a fluid flows past objects of various shapes and at various speeds.

To set up the calculational experiment we must first translate the physical situation into the language of numbers for the computer. For bookkeeping purposes the experimental area in the computer wind tunnel is divided into many square cells, which form the basic computing mesh. A typical mesh requires at least 49 cells in the direction of horizontal flow and 24 cells in the vertical dimension, for a total of 1,176 cells. Each cell must contain two numbers representing the components of average air velocity in two directions, together with other numbers representing such variable quantities as "vorticity," "stream function" and, if heat flow is desired, temperature as well. Finally, the computer must be supplied with a set of operating instructions, or "code," that spells out in detail exactly how the computer must manipulate every number in every cell in order to calculate how the flow configuration will change from instant to instant. It can require billions of mathematical operations and anywhere from a few minutes to a few hours of computing time to carry out the calculations needed to represent the flow of air for a time interval of several minutes. In our studies we have used either an IBM 704 or the somewhat faster machine, also built by the International Business Machines Corporation, known as Stretch.

The actual development of a successful code is a time-consuming process and is carried out in three steps. The first involves the testing of detailed numerical methods and is strewn with pitfalls. It is no trick, for example, to invent methods that develop numerical instability: the computer results rapidly run askew and lead to complete nonsense. Like physical experiments, computer experiments are also subject to interference by gremlins. Just as the vibration of a motor may produce extraneous turbulence in a wind tunnel, so the numerical approximations fed into a computer may lead to equally unwanted "truncation turbulence."

The second step is to prepare a full-scale code. For our problem in fluid dynamics this required many months, most of them consumed by "debugging," or ferreting out errors in the step-by-step instructions. Such a code is written with sufficient generality so that it can be used to solve a wide variety of roughly similar problems. Thus a good code can be used for years and will often be a source of inspiration for workers in other laboratories.

The third step is to formulate the code in terms of a specific problem. In our oscillating-wake study an important part of the formulation was to determine the "initial" and "boundary" conditions. The initial condition describes the state of the air as turbulence progressively increases (four steps from left to right). In this experiment computational particles are introduced through a single cell (horizontal streaks), as though squirting a jet of colored water into a clear tank. The jet of air is unstable and soon breaks into expanding, irregular vortices like those exhibited by a plume of smoke. Similar but far more complex experiments can be used to test theories about aircraft jet engine noise suppression.

We could have assumed, for example, that the air was at rest, corresponding to the condition in a real wind tunnel before the fan is turned on. We found that it was simpler, however, to start with the fluid behaving as if it were flowing past the rod in a simple laminar manner without viscosity.

The boundary conditions refer to what is happening at the edges of the computational mesh. Our decision was to have the top and bottom edges represent the walls of the wind tunnel and to have the left edge represent an air input of uniform flow. The right edge gave us more trouble, but we finally arranged for the fluid to flow out and back into the computing region in a way that created a minimum of mathematical disturbance.

The computing process itself can be compared to the making of a motion picture. Starting with the initial conditions prescribed for each of the 1,176 cells in "frame" No. 1, the computer follows the coded instructions to determine the conditions in each cell a brief instant of time later, thereby producing frame No. 2 of the film. Each successive frame is similarly generated on the basis of numerical data computed for the preceding frame. The fastest computer available to us, Stretch, can generate about 10 frames a minute. When the calculation has proceeded far enough, the results are gathered up for study.

The computer's results can be presented in any of several different forms. One form of print-out consists of all the numbers describing the flow in each frame. Usually this form of print-out is restricted to samplings taken at selected intervals, because the complete data for every one of the hundreds or thousands of cycles in an experiment would be far too much for an analyst to digest, to say nothing of storing the reams of paper. Sometimes the computer is programmed to print certain calculations that supply particular points of information, such as the amount of air drag caused by the obstacle at specific wind speeds. The most useful and popular type of print-out, however, is the actual plotting of the flow in pictorial form.

The computer itself can generate plots of the flow configurations and put them on film by means of a microfilm recorder. Several selected frames from such recordings, exactly as they came from the computer, are among the illustrations on this page and preceding pages of this article. The sequence of all the frames of an experiment, combined in a film strip and run through a motion picture projector, gives a very vivid picture of the development of vortices and other features as a fluid flows around an obstacle.

From the numbers describing the flow in each cell of the computing mesh, the computer generates streamlines that show both the direction and the speed of flow throughout the space. The speed is indicated by the spacing between the streamlines: where the lines are close together the flow is fast; where they are farther apart the flow is slower. The computer can show the streamline patterns in either of two ways: as if a camera were photographing a stream of air flowing past it or as if the camera were moving along with the stream. The latter view shows the pattern of vortices in clear detail.

The computer can even simulate the motion of markers often used to make flow visible, such as filaments of smoke in air or of dye in water. In the computer the markers consist of "computational particles." At certain desired points in the computation these particles are thrown in (the magic of the computer allows their creation anywhere at will) and thereafter they are carried along wherever the flow of air goes. Their paths of motion produce lines called streak lines. The streak lines generated by the computer give a remarkably faithful impression of the behavior of smoke or dye filaments. Perhaps the most striking of these computer constructions is the configuration of streak lines emerging from a jet: it looks like a filament of cigarette smoke.

Usually the computer is programmed to furnish several different configuration plots, showing features of the flow from various points of view. These are by no means merely an interesting album of pictures. They show the qualitative features of the development of the flow and provide precise quantitative information about the flow at every point. In many cases the computer reveals important details that would be extremely difficult to obtain from physical experiments.

The example we have described of a computer technique for investigating fluid flow is only one of many successful efforts that have been made to carry out complex experiments by computer. Other workers have used computers to tell in detail what is going on inside a nuclear reactor and to assess in an instant the progress of a rocket soaring into space. Tomorrow the computer may give accurate forecasts of the weather, of the future of the economy and of the state of man's health.


n5321 | 2025年8月9日 21:01

中国有限元

用计算数学解决工程问题通常有4个步骤:建立数学模型、设计算法、编程实现、上机计算。很长一段时间,研究人员被“卡”在计算方法上。


究竟什么是有限元?冯康曾有过形象的比喻:“分整为零、裁弯取直、以简驭繁、化难为易。”
有限元方法可谓一种特殊的“拼图游戏”:为了解决一个复杂的大问题,例如一个大型建筑的结构分析,先把它拆解成许多小块,这些小块的“拼图碎片”就是“有限元”;然后逐一分析每个“有限元”,分别建立方程;最后将它们组合成方程组并求解,最终解决问题。
实际上,我国古代“曹冲称象”的典故、数学家刘徽采用割圆法计算圆周长,就是有限元思想的具体体现。

在黄河上游,大河穿越深邃峡谷,水声如雄狮怒吼,汹涌澎湃。行至两千余里,大河骤转九十度弯,转而向西,壮美无匹。

转弯处,就是我国首座百万千瓦级水电站——刘家峡水电站,其主体构筑物为我国建造的首座超百米的混凝土高坝。然而,鲜为人知的是,刘家峡水电站建成的背后有一批中国计算数学家。他们面对国家急迫需求,夜以继日为大坝设计和建造奋战,助推水电站如期完工。

上世纪五六十年代,以冯康(1980年当选中国科学院院士)为代表的中国科学院计算数学家们在大坝计算中获得启示,独立于西方创立了有限元方法数学理论,开创了科学与工程计算方法和理论的新领域。

时至今日,有限元方法已成为研发设计类工业软件的核心技术之一。桥隧大坝、飞机船舶、手机芯片……一个个复杂事物的诞生,都离不开有限元方法的支持。

如今,中国科学院数学与系统科学研究院(以下简称数学院)的一群年轻人,正站在先辈的肩膀上,努力构建新一代基础工业软件计算内核,启航新型工业化征程。

1 萌芽:援手刘家峡

1958年,甘肃,黄河上游。

作为一项国家重大工程,百万千瓦级刘家峡水电站开工,其主体结构是一座超百米的大型混凝土坝。我国自主设计、施工、建造如此巨大的水电工程,还是第一次。如果顺利建成,奔涌的黄河水将在这里“歇脚”,实现发电、灌溉和防洪,造福一方百姓。

工程难度之大超乎想象,设计和建设方法都与以往不同。大坝工程进展缓慢,在1961年甚至一度停工。

最初承担水坝工程计算攻关任务的,是1956年从中国科学院原数学研究所分出去成立的中国科学院计算技术研究所三室(现数学院计算数学与科学工程计算研究所的前身,以下简称三室)二组的黄鸿慈等人,冯康提供业务指导。黄鸿慈和同事们编写的计算程序质量非常高,为大坝计算奠定了坚实基础。

1963年2月,农历新年刚过,寒冷依旧,刘家峡大坝设计组副组长朱昭钧风尘仆仆地来到北京求助,希望科研人员帮助解决大坝应力计算问题。冯康等三室领导经过慎重考虑,把任务交给了崔俊芝。那一年,崔俊芝25岁,刚从西北工业大学毕业工作一年,与黄鸿慈、石钟慈等人在一个办公室。

用计算数学解决工程问题通常有4个步骤:建立数学模型、设计算法、编程实现、上机计算。很长一段时间,研究人员被“卡”在计算方法上。

“对方了解工程上的试荷载方法,交给我的任务是求解由此推导出的40阶线性方程组,从而验证该方法的正确性。”1995年当选中国工程院院士的崔俊芝研究员,在当时费尽九牛二虎之力,反复验算后发现,该方法得到的线性方程组系数矩阵近乎是奇异矩阵,难以数值求解。他还尝试了黄鸿慈研发的应力函数法程序等方法,都难以得到令工程师满意的应力场计算结果。

造成这一困难的主要原因之一是当时的算力不够。崔俊芝回忆,当时我国两台最早的计算机——“103”机、“104”机相继诞生不久。他需要在字长39位、容量4K、运算速度每秒一万次、内存仅2048个全字长磁芯体的“104”机上,求解超过1000个未知数的离散方程。

而刘家峡大坝的工程师们把最后的希望寄托在了中国科学院的科研人员身上,恳求他们“无论如何要想想办法”。

“无法满足用户要求的时候,就应该另辟新路了。我们开始寻找新的方法。”崔俊芝说。

冯康(左)与崔俊芝。

2 转机:合力渡难关

上世纪60年代初,为响应“向科学进军”的号召,中国科学院提出“任务带学科”科研模式,即以完成国家重大需求任务为目标,开展系统研究,解决国家发展中遇到的科学问题。

重任在肩,精确计算大坝应力场,满足工程师的要求,成为了三室的主要任务。

转机来自冯康推荐的一篇文章和一本书。

崔俊芝至今还记得,当年,冯康在一场报告上提到普拉格、辛格1947年发表于美国《应用数学季刊》的论文,讲述把微分方程写成变分形式,用变分的原理推导差分格式,这对水坝计算有所启发。“冯先生实际是在播撒有限元方法的种子,那次报告拉开了三室‘系统性研究’的序幕。”崔俊芝说。

与此同时,冯康倡导成立了第七研究组,即理论组,黄鸿慈任组长。冯康给黄鸿慈推荐了福赛思、沃索合著的《偏微分方程的差分方法》,书中着重讲了3类偏微分方程的数值方法,给黄鸿慈提供了重要启发。

在冯康的筹划下,“水坝计算”小组分为3支小队,分别从变分法、积分守恒法、去掉坝体基础的角度开展研究。

崔俊芝在“积分守恒法”小队,该方法从平衡方程出发,把应力与应变关系带进拉梅方程进行计算。经过一段时间复杂而又艰难的计算,他们在1964年春节前得出第一批计算结果。验证了格式、解法和程序的正确性后,崔俊芝很快为刘家峡大坝计算出应力场,经朱昭钧等验算,应力基本平衡,结果比较满意,但在坝踵和坝趾附近误差仍然较大,应力场的精度还不够。

崔俊芝全力以赴编写了我国第一个平面弹性问题有限元方法计算程序,顺利计算出令设计者满意的应力场。

那个年代,科研人员编写程序十分困难。“104”机没有操作系统、编译系统、数据管理和进程管理等系统软件,所有程序都需自己使用机器指令直接编写出代码串式的程序,包括输入数据和打印结果。

在只有2048个存储单元的内存空间,崔俊芝要求解约1000阶代数方程组。他需要先扣除约500个存储单元存放程序,在剩余约1500个存储单元的限制下求解。这必须精打细算、精心设计迭代算法。

经过夜以继日的调试、查错、修改、验算,崔俊芝利用自主编写的有限元程序,终于为刘家峡设计组计算出十多组方案及其工况作用下的应力场。

1964年5月中旬,“刘家峡计算任务汇报会”召开。朱昭钧及其同事、黄鸿慈等参加了会议,崔俊芝汇报了计算任务完成情况,朱昭钧对此给予了很高评价。

1966年10月,胜利的消息从高原传来,万马奔腾般奔流而来的黄河被巍峨的大坝拦腰截住,“锁”在峡谷之中,平静而缓和。从此,刘家峡水电站成为了亮丽的“高原明珠”。

刘家峡大坝建设成功后,中央发出明码电报,表彰科研人员为刘家峡工程所作出的突出贡献。

“冯先生总是高瞻远瞩,引领着计算数学及其应用研究的发展。”崔俊芝说。

1978年10月,冯康(中)与黄鸿慈、数学院研究员张关泉合影。

3 首创:独立于西方

冯康一直是计算数学团队的实际学术指导和领路人。因为冯康,有限元的“种子”从刘家峡大坝的土壤中生根发芽,成为世界级学术成果。

究竟什么是有限元?冯康曾有过形象的比喻:“分整为零、裁弯取直、以简驭繁、化难为易。”

有限元方法可谓一种特殊的“拼图游戏”:为了解决一个复杂的大问题,例如一个大型建筑的结构分析,先把它拆解成许多小块,这些小块的“拼图碎片”就是“有限元”;然后逐一分析每个“有限元”,分别建立方程;最后将它们组合成方程组并求解,最终解决问题。

实际上,我国古代“曹冲称象”的典故、数学家刘徽采用割圆法计算圆周长,就是有限元思想的具体体现。

1943年,著名数学家柯朗发表了世界上第一篇具有有限元思想的论文,只是由于当时计算机尚未出现,该文章未引发应有的关注。

20余年后,随着航空事业的快速发展,复杂的结构分析问题对计算方法提出了更高要求。计算机的出现,使大量复杂的有限元计算得以实现。美国波音公司工程师特纳、陶普和土木工程教授克劳夫、航空工程教授马丁合作,在航空科技领域的期刊上发表了一篇采用有限元技术计算飞机机翼强度的论文。学界一般认为,这是工程学界有限元方法的开端。

有限元方法于1960年由克劳夫在美国土木工程学会计算机会议上第一次正式提出。他发表的一篇处理平面弹性问题的论文,将应用范围扩展到飞机以外的土木工程。

但当时的中国与西方隔绝,中国数学家难以了解有限元方法的发展前沿。

“西方的有限元方法是作为结构工程的分析方法而提出的,在中国则是从数学发展而来,中国和西方沿着不同方向独立发展出有限元方法。”崔俊芝说。

冯康于1965年在《应用数学与计算数学》发表论文《基于变分原理的差分格式》,这是一套求解偏微分方程的数值算法,也是著名的有限元方法。

事实上,后来任香港浸会大学教授的黄鸿慈,1963年发表了中国第一篇包含有限元思想的文章。但他在多个场合提到:“我的文章已包含了证明中最重要的原理,冯先生则是在最广泛的条件下得出最一般的结论,这只有在高深的数学基础上才能做到,因而也具有更高层次的开创性,西方在1969年以后才得出了类似结果。”“如果有限元方法不是像数学家这样处理,其应用就大受限制,就不能形成今天这样在理论上、应用上被如此广泛重视的局面。”

《基于变分原理的差分格式》既是冯康的传世之作,也是中国学者独立于西方创立有限元方法理论的标志。

改革开放后,冯康的论文被译成英文,被世界知晓。原美国总统科学顾问、纽约大学柯朗数学研究所所长拉克斯在纪念冯康的文章中特别指出,冯康“独立于西方国家在应用数学方面的发展,创造了有限元方法理论……在方法的实现及理论基础的创立两方面都作出了重要贡献”。

著名数学家丘成桐曾在1998年指出:“中国近代数学能超越西方或与之并驾齐驱的有3个,陈省身的示性类、华罗庚的多复变函数和冯康的有限元计算。”巴布斯卡、利翁斯等国际知名数学家在相关文章中也都给予了高度评价。

类似的评价很快得到许多国际同行的认同。这篇传世之作犹如暗夜里的一束火光,指引、温暖着那群30多岁的年轻人。

位于北京中关村南街的三室很快热闹起来,信函和来访络绎不绝。为全面介绍有限元方法,冯康、崔俊芝等人创办了讲习班,近300人参加,其中不乏知名学者。崔俊芝回忆,讲习班影响很大,对促进有限元方法在中国的推广和应用发挥了很大作用。

1982年,冯康、黄鸿慈、王荩贤、崔俊芝因独立于西方创立有限元方法获得国家自然科学奖二等奖。

冯康(右一)在学术报告会上。

4 传承:突围工业软件内核

如今,距离中国科学院计算数学团队完成刘家峡计算任务已过去60年了。那段历史多次被计算数学家们提起,他们不断思考:为什么是冯康独立于西方开创了有限元方法?如何传承老一辈科学家的精神?

崔俊芝这样解释为什么冯康能成功:“他拥有深厚的跨学科知识、多学科综合交叉思维的学术思想;不仅能深刻认识科学与工程问题的物理模型,洞察解决问题的可行路径,还能统观不同科学与工程问题的内涵,以高度抽象和严格的数学形式表述它们;再加上他全身心地投入科学研究,具有不达目标决不罢休的献身精神,这些使得他一个接一个地做出了国际首创的研究成果。”

冯康于1993年逝世,年轻人虽未能有机会当面聆听其教诲,但早已对冯康及刘家峡计算的故事耳熟能详,并从中汲取着强劲的精神动力。

随着计算机的迅猛发展,基于有限元方法的软件已经成为辅助现代工程和装备研发的主要软件——计算机辅助工程CAE的核心。CAE 软件可对工程和装备的功能、性能与安全可靠性进行计算分析,对未来工程和装备进行模拟仿真,证实其可用性与可靠性。

CAE与计算机辅助设计(CAD)、计算机辅助制造(CAM) 组合简称为CAX,是现代工业的基石、智能制造的灵魂,是我国成为制造强国的基础性战略支撑。

2023年7月,在中国科学院战略性先导科技专项支持下,数学院成立了基础软件研究中心,围绕工业软件CAX一体化的计算方法和数学理论,向关键科学问题发起冲击。

数学院年轻的研究员崔涛、贾晓红等组成“冯康CAX科技攻关青年突击队”,承袭先辈的衣钵,构建新一代具有中国自主知识产权的CAX基础工业软件的数学内核。

基础软件研究中心让平均年龄不超过40岁的年轻人担当重任,考核标准也不再是发多少论文,而是产出“实在、能用”的算法和软件。

这并不是一条坦途。但在新一代年轻人看来,“路虽远,行则将至;事虽难,做则必成”。

冯康领衔开创的有限元方法,正孕育新的开创性、引领性成果,迸发出崭新的、无限的可能。

2024年1月,CAX一体化算法开发验证平台发布。数学院供图


n5321 | 2025年7月31日 23:53

Thirty years of development and application of CFD at Boeing Commercial Airplanes,

2004年发布的波音公司CFD应用30年。

1973到2004.波音搞了数千亿美金的飞机。30年里,波音的工程师所使用的工具必须具备准确预测和确认飞机的飞行特性的能力。在1973年以前,这些工具由解析近似法、风洞试验和飞行测试组成。但是这三十年里,波音用的CFD。

这篇短文讲的是波音的西雅图采购,开发,应用CFD的情况。

介绍

1973 年,波音商用估计有 100 到 200 次的CFD分析。2002年,超过了2W次。这2W次案例涉及的情况也更为复杂。为什么?原因:

  1. 现在人们承认 CFD 具有巨大的价值,并在飞行器设计、分析和支持流程中带来了范式转变;

  2. 波音公司的 CFD 工作由强大而有能力的远见卓识者 Paul Rubbert 博士领导,他招募并得到了许多才华横溢的管理人员和技术人员的支持;

  3. CFD 工作非常多样化,涉及算法研究、代码开发、应用和验证研究、流程改进和用户支持;

  4. 波音公司开发了广泛的产品线,并得到了许多富有创新精神和严格要求的项目工程师的支持;

  5. 计算能力和可负担性提高了三到四个数量级;

  6. 学术界和政府中的众多先驱者继续在算法上取得突破;

  7. 波音公司和政府中的资金经理不反对承担风险。

The role and value of CFD

工程师的目的:预测和确认飞行特性。方式:解析,风洞测试,飞行测试。新方式CFD—— 用数值算法进行仿真分析。CFD 的价值是它以低成本进行少量模拟就能获得完成设计所需的“理解”。具体说,CFD 可用于“逆向设计”或优化模式,预测优化某些流动特性或收益函数(例如阻力)所需的几何形状变化。可以对实验数据(通常通过在风洞中测试飞行器的缩比模型)进行分析,扩展数据,获取准确的飞机的特性。还可以帮工程师找到设计失效问题的根源。

Effective use of CFD is a key ingredient in the successful design of modern commercial aircraft.

有效运用 CFD 是波音成功设计飞机的一项关键因素

聪明、普遍且谨慎地使用 CFD 是波音产品开发的主要战略。Experience to date at Boeing Commercial Airplanes has shown that CFD has had its greatest effect in the aerodynamic design of the high-speed cruise configuration of a transport aircraft.

经验表明CFD) 在波音的飞机设计中发挥了至关重要的作用。过去 20 年使用 CFD 搞飞机开发波音公司节省了数千万美元。数千万美元好像不菲,但它们只是 CFD 为波音创造的价值的一小部分。大头是使用CFD以后为飞机增加附加值。Value to the airline customer is what sells airplanes!

Value is added to the airplane product by achieving design solutions that are otherwise unreachable during the fast-paced development of a new airplane. Value is added by shortening the design development process. Time to market is critical in the commercial world, particularly when starting after a competitor has committed a similar product to market. Very important in the commercial world is getting it right the first time. No prototypes are built. From first flight to revenue service is frequently less than one year! Any deficiencies discovered during flight test must be rectified sufficiently for government certification and acceptance by the airline customer based on a schedule set years before. Any delays in meeting this schedule may result in substantial penalties and jeopardize future market success. The added value to the airplane product will produce increased sales and may even open up completely new markets. The result is more profit to both the buyer and seller (who does not have to discount the product as much to make the sale). All this translates into greater market share.

商业价值详解见上。

CFD 开发和应用过程

In industry, CFD has no value of its own. The only way CFD can deliver value is for it to affect the product. CFD必须成为产品设计、制造和支持工程流程中不可或缺的一部分 。it must get into the hands of the engineers who execute these processes. 理想

The CFD developers and ‘‘expert’’ users can certainly contribute, but are only a part of the engineering process.

将 CFD 投入“生产”并非易事——这通常是一个耗时多年的过程。

CFD 开发流程分为五个不同的阶段

  1. 第一阶段旨在开发使能技术算法,为解决特定问题提供基本方法。

  2. 第二阶段是对新计算技术的初步探索、验证和演示。(demo)主要输出是演示代码(可用于计算实验和演示),并结合对实际需求的设想。

  3. 第三阶段旨在提供该设想的实质内容,通常需要对第二阶段的代码进行泛化或其他修改(可能是完全重写),并结合前后端界面,以生成用户友好、易于理解且易于维护的软件。They have yet to gain enough confidence to make important, standalone decisions based on the code. That takes time, exposure, and experience.

  4. 第四阶段涉及“应用研究”,设计工程师、管理人员和代码开发人员共同努力,研究这项新功能将如何融入并改变气动设计流程。软件落地

  5. 第五阶段:成熟的能力。代码通常需要相当长的时间才能达到第五阶段的成熟度

Forrester T. Johnson *, Edward N. Tinoco, N. Jong Yu

Received 1 June 2004; accepted 18 June 2004 Available online 26 February 2005

Abstract

Over the last 30 years, Boeing has developed, manufactured, sold, and supported hundreds of billions of dollars worth of commercial airplanes. During this period, it has been absolutely essential that Boeing aerodynamicists have access to tools that accurately predict and confirm vehicle flight characteristics. Thirty years ago, these tools consisted almost entirely of analytic approximation methods, wind tunnel tests, and flight tests. With the development of increasingly powerful computers, numerical simulations of various approximations to the Navier–Stokes equations began supplementing these tools. Collectively, these numerical simulation methods became known as Computational Fluid Dynamics (CFD). This paper describes the chronology and issues related to the acquisition, development, and use of CFD at Boeing Commercial Airplanes in Seattle. In particular, it describes the evolution of CFD from a curiosity to a full partner with established tools in the design of cost-effective and high-performing commercial transports.


Contents

  1. Introduction . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1116

  2. The role and value of CFD . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1117

  1. The CFD development and application process . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .1120

  2. Chronology of CFD capability and use . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1124 4.1. Linear potential flow . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1125 4.1.1. First generation methods––early codes . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1125 4.1.2. First generation methods––TA230 . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1126 4.1.3. Second generation linear potential flow method––PANAIR/A502 . . . . . . . . . . . . . . . . . 1128 4.2. Full potential/coupled boundary layer methods . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1131 4.2.1. A488/A411 . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1131 4.2.2. TRANAIR . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1132 4.2.3. BLWF . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1137 4.3. Euler/coupled boundary layer methods . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1138 4.4. Navier–Stokes methods . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1139 4.4.1. Structure grid codes––Zeus TLNS3D/CFL3D, OVERFLOW . . . . . . . . . . . . . . . . . . . . . . 1139 4.4.2. Unstructured grid codes––Fluent, NSU2D/3D, CFD++ . . . . . . . . . . . . . . . . . . . . . . . 1142 4.4.3. Other Navier–Stokes codes . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1143 4.4.4. Next generation Navier–Stokes codes . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1143 4.5. Design and optimization methods . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1145 4.5.1. A555, A619 inverse design codes . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1145 4.5.2. TRANAIR optimization . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1146

  3. Conclusions . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1148 References . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 1148

  4. Introduction

In 1973, an estimated 100–200 computer runs simulating flows about vehicles were made at Boeing Commercial Airplanes, Seattle. In 2002, more than 20,000 CFD cases were run to completion. Moreover, these cases involved physics and geometries of far greater complexity. Many factors were responsible for such a dramatic increase: (1) CFD is now acknowledged to provide substantial value and has created a paradigm shift in the vehicle design, analysis, and support processes; (2) the CFD effort at Boeing was led by a strong and capable visionary, Dr. Paul Rubbert, who recruited and was supported by the services of a number of talented managers and technical people; (3) this CFD effort was well diversified, involving algorithm research, code development, application and validation studies, process improvement, and user support; (4) Boeing developed a broad line of products, supported by a number of innovative and demanding project engineers; (5) computing power and affordability improved by three to four orders of magnitude; (6) numerous pioneers in academia and the Government continued to make algorithmic breakthroughs; and (7) there were funding managers in Boeing and the Government who were not averse to taking risks.

It would be impossible to adequately address all these factors in this short paper. Consequently, we will concentrate on issues that were central to the efforts of the authors, who have been members of the CFD Development and Applications groups at Boeing, Seattle for more than 30 years. In Section 2, we describe the role and value of CFD as it has evolved over the last 30 years and as it may possibly evolve in the future. In Section 3, we describe the CFD development and application processes. In Section 4, we lay out a brief history of the codes and methods that were most heavily used at Boeing, Seattle, as well as some of the issues that lay behind their development. In Section 5, we draw some brief conclusions.

Finally, we note that CFD has had a long and distinguished history in many other parts of the Boeing Enterprise. That history would best be related by those intimately involved.

  1. The role and value of CFD

The application of CFD today has revolutionized the process of aerodynamic design. CFD has joined the wind tunnel and flight test as primary tools of the trade [1–4]. Each has its strengths and limitations Because of the tremendous cost involved in flight testing, modern aircraft development must focus instead on the use of CFD and the wind tunnel. The wind tunnel has the advantage of dealing with a ‘‘real’’ fluid and can produce global data over a far greater range of the flight envelope than can CFD. It is best suited for validation and database building within acceptable limits of a development program's cost and schedule. Historically, CFD has been considered unsuited for such as task. However, the wind tunnel typically does not produce data at flight Reynolds number, is subject to significant wall and mounting system corrections, and is not well suited to provide flow details. The strength of CFD is its ability to inexpensively produce a small number of simulations leading to understanding necessary for design. Of great utility in this connection is the fact that CFD can be used in an ‘‘inverse design’’ or optimization mode, predicting the necessary geometry shape changes to optimize certain flow characteristics or a payoff function (e.g., drag). Beyond this, CFD is heavily used to provide corrections for the extrapolation of data acquired experimentally (typically from testing a reduced scale model of the vehicle in a wind tunnel) to conditions that characterize the full-scale flight vehicle. Finally, CFD is used to provide understanding and insight as to the source of undesirable flight characteristics, whether they are observed in subscale model testing or in the full-scale configuration.

Effective use of CFD is a key ingredient in the successful design of modern commercial aircraft. The combined pressures of market competitiveness, dedication to the highest of safety standards, and desire to remain a profitable business enterprise all contribute to make intelligent, extensive, and careful use of CFD a major strategy for product development at Boeing.

Experience to date at Boeing Commercial Airplanes has shown that CFD has had its greatest effect in the aerodynamic design of the high-speed cruise configuration of a transport aircraft. The advances in computing technology over the years have allowed CFD methods to affect the solution of problems of greater and greater relevance to aircraft design, as illustrated in Figs. 1 and 2. Use of these methods allowed a more thorough aerodynamic design earlier in the development process, permitting greater concentration on operational and safety-related features.

The 777, being a new design, allowed designers substantial freedom to exploit the advances in CFD and aerodynamics. High-speed cruise wing design and propulsion/airframe integration consumed the bulk of the CFD applications. Many other features of the aircraft design were influenced by CFD. For example, CFD was instrumental in design of the fuselage. Once the body diameter was settled, CFD was used to design the cab. No further changes were necessary as a result of wind tunnel testing. In fact, the need for wind tunnel testing in future cab design was eliminated. Here, CFD augmented wind tunnel testing for aft body and wing/body fairing shape design. In a similar fashion, CFD augmented wind tunnel testing for the design of the flap support fairings. The wind tunnel was used to assess the resulting drag characteristics. CFD was used to identify prime locations for static source, sideslip ports, and angle-of-attack vanes for the air data system. CFD was used for design of the environmental control system (ECS) inlet and exhaust ports and to plan an unusual wind tunnel evaluation of the inlet. The cabin (pressurization) outflow valves were positioned with CFD. Although still in its infancy with respect to high-lift design, CFD did provide insight to high-lift concepts and was used to assess planform effects. The bulk of the high-lift design work, however, was done in the wind tunnel [5]. Another collaboration between the wind tunnel and CFD involved the use of CFD to determine and refine the corrections applied to the experimental data due to the presence of the wind tunnel walls and model mounting system.

The Next Generation 737-700/600/800/900 (illustrated in Fig. 2), being a derivative of earlier 737s, presented a much more constrained design problem. Again the bulk of the CFD focused on cruise wing design and engine/airframe integration. Although the wing was new, its design was still constrained by the existing wing-body intersection and by the need to maintain manual control of the ailerons in case of a complete hydraulic failure. As with the 777, CFD was used in conjunction with the wind tunnel in the design of the wing-body fairing, modifications to the aft body, and design of the flap track fairings and the high-lift system.

Boeing Commercial Airplanes has leveraged academia- and NASA-developed CFD technology, some developed under contract by Boeing Commercial Airplanes, into engineering tools used in new airplane development. As a result of the use of these CFD tools, the number of wings designed and wind tunnel tested for high-speed cruise lines definition during an airplane development program has steadily decreased (Fig. 3). In recent years, the number of wings designed and tested is more a function of changing requirements during the development program and the need to support more extensive aerodynamic/structural trade studies during development. These advances in developing and using CFD tools for commercial airplane development have saved Boeing tens of millions of dollars over the past 20 years. However, as significant as these savings are, they are only a small fraction of the value CFD delivered to the company.

A much greater value of CFD in the commercial arena is the added value of the product (the airplane) due to the use of CFD. Value to the airline customer is what sells airplanes! Value is added to the airplane product by achieving design solutions that are otherwise unreachable during the fast-paced development of a new airplane. Value is added by shortening the design development process. Time to market is critical in the commercial world, particularly when starting after a competitor has committed a similar product to market. Very important in the commercial world is getting it right the first time. No prototypes are built. From first flight to revenue service is frequently less than one year! Any deficiencies discovered during flight test must be rectified sufficiently for government certification and acceptance by the airline customer based on a schedule set years before. Any delays in meeting this schedule may result in substantial penalties and jeopardize future market success. The added value to the airplane product will produce increased sales and may even open up completely new markets. The result is more profit to both the buyer and seller (who does not have to discount the product as much to make the sale). All this translates into greater market share.

CFD will continue to see an ever-increasing role in the aircraft development process as long as it continues to add value to the product from the customer's point of view. CFD has improved the quality of aerodynamic design, but has not yet had much effect on the rest of the overall airplane development process, as illustrated in Fig. 4. CFD is now becoming more interdisciplinary, helping provide closer ties between aerodynamics, structures, propulsion, and flight controls. This will be the key to more concurrent engineering, in which various disciplines will be able to work more in parallel rather than in the sequential manner as is today's practice. The savings due to reduced development flow time can be enormous!

To be able to use CFD in these multidisciplinary roles, considerable progress in algorithm and hardware technology is still necessary. Flight conditions of interest are frequently characterized by large regions of separated flows. For example, such flows are encountered on transports at low speed with deployed high-lift devices, at their structural design load conditions, or when transports are subjected to in-flight upsets that expose them to speed and/or angle of attack conditions outside the envelope of normal flight conditions. Such flows can only be simulated using the Navier–Stokes equations. Routine use of CFD based on Navier–Stokes formulations will require further improvements in turbulence models, algorithm, and hardware performance. Improvements in geometry and grid generation to handle complexity such as high-lift slats and flaps, deployed spoilers, deflected control surfaces, and so on, will also be necessary. However, improvements in CFD alone will not be enough. The process of aircraft development, itself, will have to change to take advantage of the new CFD capabilities.

  1. The CFD development and application process

In industry, CFD has no value of its own. The only way CFD can deliver value is for it to affect the product. To affect the product, it must become an integral part of the engineering process for the design, manufacture, and support of the product. Otherwise, CFD is just an add-on; it may have some value but its effect is limited. To make CFD an integral part of the Product Development and Support engineering processes, it must get into the hands of the engineers who execute these processes. This is the only way the volume of analysis/design runs necessary to affect the product can be made. Moreover, it is in the Product Development and Support organizations that ownership of the CFD/engineering processes resides, and it is these processes that management relies on when investing billions of dollars in a new airplane development. The CFD developers and ‘‘expert’’ users can certainly contribute, but are only a part of the engineering process.

Getting CFD into ‘‘production’’ use is not trivial––it is frequently a multiyear process. There are five distinct phases in the CFD development process. These are illustrated in Fig. 5.

Phase I produces enabling technology algorithms that provide a basic means for solving a given problem. Phase II, which overlaps Phase I, constitutes the initial attempts to explore, validate, and demonstrate a new computational technology. There are some limited pioneering applications at this stage, but the emerging technology is not yet at a state that will produce significant payoff or impact because the technology is still subject to surprise. Hence, managers and design engineers are unwilling at this point to make important, standalone design decisions based on computed results. Such decisions by users do not happen until well into Phase IV.

Many of the code developments end in the middle of Phase II with a contractor report or scientific paper that proclaims, ‘‘Gee whiz, look what can be done.’’ For many codes, this is a good and natural transfer point for industry to assume responsibility for further development, because most of what must occur beyond this point will be unique to the particular needs of each individual industry organization. Of course, this implies that corporate managers must have the wisdom to understand what they must support to turn such a code into a mature and effective capability that will live up to the ‘‘Gee whiz’’ expectations. That requires the time and investment associated with Phases III and IV.

The main outputs of Phase II are demonstrator codes (useful for computational experiments and demonstrations) combined with a vision of what is really needed. Phase III is aimed at supplying the substance of that vision and usually entails a generalization or other modification of Phase II codes (perhaps complete rewrites) combined with a coupling of front- and back-end interfaces to produce user-friendly, well-understood, and maintainable software. Most commercially available (COTS) codes have reached this stage of development. But even at this stage, their contribution or effect on the corporate bottom line is still minimal because engineers and managers don't yet understand how the existence of this new tool will change the engineering process and what it will be used for. They have yet to gain enough confidence to make important, standalone decisions based on the code. That takes time, exposure, and experience.

In the fourth phase, the payoff or affect of a code grows rapidly. Phase IV entails ‘‘applications research,’’ where design engineers, management, and code developers work together to learn how this new capability will enter into and change the aerodynamic design process. The applications research endeavor requires people with broad backgrounds who can ask the right questions of the algorithm researchers, and code developers who can intelligently question experimental data when test-theory comparisons don't agree. Both must also be good physicists, for it is not unusual to find that the short-comings lie neither in the experiment nor in the quality of the computations, but in the fact that the theoretical model assumed in the computations was not an adequate description of the real physics. Need for code refinements that were not anticipated invariably surface during this phase and these refinements often require more algorithm research, additional geometry preprocessors, and so on. Over time, the requests for additions or refinements diminish until the code settles down to occupy its proper niche in the toolbox, and design engineers and managers have learned the capabilities, limitations, and proper applications of this now-mature code. Without the investments in Phase IV, the enormous pay-off of having a mature capability in Phase V will not happen. An attempt to bypass Phase IV by taking a code developed by algorithm researchers and placing it directly in the hands of design engineers, who may not understand the underlying theoretical models, algorithms, and possible numerical idiosyncrasies, usually results in a prolonged period of frustration and unreliability that leads to abandonment of the code.

Product Development engineers must be able to focus on engineering processes and have little time for manipulating the CFD ‘‘process’’ (i.e., codes must be very user oriented). Stable, packaged software solutions enable and promote consistent processes. These not only put CFD into the hands of the Product Development/Product Support engineers but also allow the ‘‘expert’’ user to get fast results with reduced variation. Integrated packaged software solutions combine various components to go from ‘‘lofts to plots’’ in the time scale consistent with a fast-paced engineering program. These packages include scripted packages for ‘‘standard’’ configurations, geometry and grid/paneling generation components, flow solvers, and postprocessing components for analyzing the results. These are all placed under some form of software version control to maintain consistency.

A key component of CFD and most engineering processes is geometry. CAD systems, such as CATIA, dominate most geometry engineering needs. However, these systems are designed for component design and definition and are not well suited to CFD use. A key component of many Boeing Commercial Airplanes CFD processes is AGPS––Aero Grid and Paneling System [6]. AGPS is a geometry software tool implemented as a programming language with an interactive graphical user interface. It can be dynamically configured to create a tailored geometry environment for specific tasks. AGPS is used to create, manipulate, interrogate, or visualize geometry of any type. Since its first release in 1983, AGPS has been applied with great success within The Boeing Company to a wide variety of engineering analysis tasks, such as CFD and structural analysis, in addition to other geometry-related tasks.

Computing resources consisting of high-end computing and graphics workstations must also be integrated. Seamless mass data storage must be available to store the vast amount of information that will be generated during the engineering application. These resources require dedicated computing system administration. The software control and computing system administration are necessary to free the engineers to focus their work on the engineering processes and not be consumed by the ‘‘computing’’ process.

Close customer involvement and acceptance is absolutely essential to deriving value from CFD. Customers are responsible for implementing the engineering process that will use CFD. They own the process, they determine what CFD, if any, they will depend on to carry out their assigned tasks. They are being graded on the engineering tasks they accomplish not on which CFD codes they use. Their use and trust of CFD is based on a long-term relationship between supplier and user. This relationship has engaged the customer early on in demonstrations of a new code or new application of an existing code. Validation is an on-going process, first of cases of interest to the customer, and then of the customer's ability to implement the new tool. Frequently, parallel applications are undertaken in which the customer continues with the existing tools while the supplier/developer duplicates the process with the new tool. This is especially the case when the new tool may enable the development of an entirely new process for executing the engineering task.

The long-term relationship with the customer is essential from another point of view. Until recently, project engineers, without exception, initially rejected every new CFD development that later became the primary CFD analysis and design tool in Boeing Commercial Airplanes Product Development and Product Support organizations. Every new or proposed CFD capability was initially viewed as too difficult to use, too costly to run, not able to produce timely results, not needed, and so on. ‘‘Just fix what we already have,’’ the customer would tell the developers. The customers had a point. Not until the new CFD technology had been integrated with the customer's preprocessing/postprocessing tools and computing system, validated to the customer's program, guaranteed of long-term support, and committed to continuous development and enhancement would the new technology be useful to them.

This made it difficult for the developers to propose new Phase I, II and III efforts. In particular, the initiation and continual defense of Phase I efforts demanded clear and unwavering vision. True vision invariably requires a fundamental understanding of both needs and means. As customers generally did not have the specialized algorithmic knowledge underlying CFD numerics, it was incumbent on the developers to acquire a thorough understanding of customer needs and concerns. The developers learned they could not just throw a new CFD tool over the fence and expect the customer to use it no matter how good it might be. The customer was interested in getting an engineering job done and not in the CFD tool itself! The process of thoroughly understanding customer issues took many years, and early Phase I, II, and III efforts were mostly ‘‘technology push’’ efforts, which had to be funded by NASA or other Government agencies. As these efforts progressed to Phase IV and V, and the developers established a track record for producing useful capabilities, the situation gradually changed.

Each success allowed the developers a little more leeway. Often they spotted ‘‘niche’’ needs that could be satisfied by the introduction of their new technology. It was felt that when the users were satisfied with the usability and utility of the technology in these areas they would then be willing to consider whether or not replacing their old tools in other areas might offer distinct advantages. Once the users accepted a new capability, they often became very innovative and applied the codes in unanticipated ways, perpetually keeping the developers and validation experts in an anxious state. Most of the new applications were, in fact, legitimate, and the developers had to run fast to understand the implications involved as well as to try and anticipate future application directions. As time went on, code developers, application experts, and project engineers began understanding each other's functions and issues, and a certain amount of trust developed. Gradually, CFD became a ‘‘pull’’ rather than ‘‘push’’ technology. This transformation was greatly facilitated by the rotation of top engineers between these functions.

Today in Boeing Commercial Airplanes, more than 20,000 CFD runs a year are made to support product development and the various existing product lines. More than 90% of these runs are done by production engineers outside the research group. The CFD methods in use provide timely results in hours or days, not weeks or months. Sufficient experience with the methods has given management confidence in their results. This means that solutions are believable without further comparison of known results with experiment, that the CFD methods contain enough of the right physics and resolve the important physical and geometric length scales, that the numerics of the method are accurate and reliable, and that the CFD tools are already in place––for there is no time to develop and validate new methods. Most of all, management is convinced that the use of CFD makes economic sense. A look at the history of CFD at Boeing Commercial Airplanes will show how we got to this level of use.

  1. Chronology of CFD capability and use

CFD today covers a wide range of capabilities in terms of flow physics and geometric complexity. The most general mathematical description of the flow physics relevant to a commercial transport is provided by the Navier–Stokes equations. These equations state the laws of conservation of mass, momentum, and energy of a fluid in thermodynamic equilibrium. Unfortunately, direct solutions to these equations for practical aircraft configurations at typical flight conditions are well beyond the capabilities of today's computers. Such flows include chaotic, turbulent motions over a very wide range of length scales. Computations for the simulations of all scales of turbulence would require solving for on the order of 10¹⁸ degrees of freedom!

Fortunately, solutions to simplified (and more tractable) forms of these equations are still of great engineering value. Turbulent flows may be simulated by the Reynolds equations, in which statistical averages are used to describe details of the turbulence. Closure requires the development of turbulence models, which tend to be adequate for the particular and rather restrictive classes of flow for which empirical correlations are available, but which may not be currently capable of reliably predicting behavior of the more complex flows that are generally of interest to the aerodynamicist. Use of turbulence models leads to various forms of what are called the Reynolds-averaged Navier–Stokes equations.

For many aerodynamic design applications, the flow equations are further simplified to make them more amenable to solution. Neglecting viscosity leads to the Euler equations for the conservation of mass, momentum, and energy of an inviscid fluid. Fortunately, under many flight conditions the effects of viscosity are small and can be ignored or simulated by the addition of the boundary layer equations, a much simplified form of the Reynolds-averaged Navier–Stokes equations.

The introduction of a velocity potential reduces the need to solve five nonlinear partial differential equations (that make up the Euler equations) to the solution of a single nonlinear partial

differential equation known as the full potential equation. However, the potential approximation assumes an inviscid, irrotational, isentropic (constant entropy) flow. Potential solutions can adequately simulate shock waves as long as they are weak, which is the normal case for commercial transport configurations.

Further simplifications eliminate all the nonlinear terms in the potential equation, resulting in the Prandtl–Glauert equation for linear compressible flows, or the Laplace equation for incompressible flows. The use of these equations is formally justified when the vehicle is relatively slender or thin and produces only small disturbances from freestream flow.

In the following sections, we describe the CFD capability most heavily used at Boeing Commercial Airplanes in Seattle over the last 30 years. For the purposes of a rough chronological summary, we can say the following. Before 1973, the main codes employed by project engineers involved linearized supersonic flows with linearized representations of the geometry or else 2D incompressible flows. From 1973 to 1983, panel methods, which could model complex geometries in the presence of linear subsonic and supersonic flows, took center stage. The nonlinear potential flow/coupled boundary layer codes achieved their prime from 1983 to 1993. Their Euler counterparts came into use later in that timeframe. From 1993 to 2003, Reynolds averaged Navier–Stokes codes began to be used with increasing frequency. Clearly, much of the development and demonstration work leading to the widespread use of these codes occurred from five to 10 years earlier than these dates. It is important to note that a considerable length of time is often required for a code to achieve the Phase V level of maturity. It is also important to realize that once a code achieves this level of maturity and is in use and accepted by the user community, it tends to remain in use, even though improved capability at the Phase III or IV level may be available.

The Boeing panel code, A502, remains in some use today, even though its underlying technology was developed almost 30 years ago. The full potential code TRANAIR still receives widespread and heavy use.

4.1. Linear potential flow

4.1.1. First generation methods––early codes The flow physics described by the early linear methods were greatly simplified compared to the ‘‘real’’ flow. Similarly, the geometric fidelity of the actual configuration also had to be greatly simplified for the computational analysis to fit within the speed and size constraints of the computers of that era. In spite of such seemingly hopeless limitations, these early CFD methods were successfully applied during the supersonic transport development programs of the late 1960s––the Anglo-French Concord and the United States/Boeing SST. The need for computational help in the aerodynamic development of these aircraft stemmed from two factors. First, there was the relative lack of experience in designing supersonic cruise aircraft (the first supersonic flight had occurred only 15 years earlier). Second, there is great sensitivity of supersonic wave drag to details of the aircraft design. Thus, the challenge of developing a viable low-drag design through empirical ‘‘cut and try’’ demanded whatever computational help was available. The opportunity to use simplified computational methods resulted because the design requirements for low supersonic wave drag led to thin, slender vehicles that minimized ‘‘perturbing’’ the airflow. These characteristics were consistent with the limitations of the linearized supersonic theory embedded in the early CFD codes. These codes included TA80 [7], a Supersonic Area Rule Code based on slender body theory; TA139/201 [8], a Mach Box Code based on linearized supersonic theory; and TA176/217 [9], a Wing-Body Code based on linear potential flow theory with linearized geometry representations. These codes ran on IBM7094 machines. The good agreement with test data predicted by these linear theory methods for a drag polar of the Boeing SST model 733-290 is shown in Fig. 6. This was a linear theory optimized design of the configuration that allowed Boeing to win the SST design development Government contract. The resulting supersonic transport designs ended up looking as they did, in part, because the early CFD codes could not handle more geometrically complex configurations.

The linear aerodynamics of the Wing-Body Code was later combined with linear structural and dynamic analysis methods in the FLEXSTAB [10] system for the evaluation of static and dynamic stability, trim state, inertial and aerodynamic loading, and elastic deformations of aircraft configurations at supersonic and subsonic speeds. This system was composed of a group of 14 individual computer programs that could be linked by tape or disk data transfer. The system was designed to operate on CDC-6000 and -7000 series computers and on the IBM 360/370 computers. A very successful early application of FLEXSTAB was the aeroelastic analysis of the Lockheed YF-12A as part of the NASA Flight Loads program. Thirty-two flight test conditions ranging from Mach 0.80 to 3.0 and involving hot or cold structures and different fuel loading conditions were analyzed at several load factors [11].

4.1.2. First generation methods––TA230 By 1973, 3D subsonic panel methods were beginning to affect the design and analysis of aircraft configurations at Boeing. Subsonic panel methods had their origins with the introduction of the Douglas Neumann program in 1962 [12]. This program was spectacularly successful for its time in solving the 3D incompressible linear potential flow (Laplace) equation about complex configurations using solid wall (Neumann) boundary conditions. The numerical method represented the boundary by constant strength source panels with the strengths determined by an influence coefficient equation set relating the velocities induced by the source panels to the boundary conditions. The lack of provision for doublet panels limited the class of solutions to those without potential jumps and hence without lift. One of the first computer programs for attacking arbitrary potential flow problems with Neumann boundary conditions [13,14] combined the source panel scheme of the Douglas Neumann program with variations of the vortex lattice technique [15]. This program became known as the Boeing TA230 program. A very useful feature of this program was the ability to handle, in a logical fashion, any well-posed Neumann boundary value problem. From its inception, the method employed a building block approach wherein the influence coefficient equation set for a complex problem was constructed by simply assembling networks appropriate to the boundary value problem. A network was viewed as a paneled surface segment on which a source or doublet distribution was defined, accompanied by a properly posed set of Neumann boundary conditions. The surface segment could be oriented arbitrarily in space and the boundary conditions could be exact or linearized. Several doublet network types with differing singularity degrees of freedom were available to simulate a variety of physical phenomena producing discontinuities in potential. Compressibility effects were handled through scaling. These features combined to allow the analysis of configurations having thin or thick wings, bodies, nacelles, empennage, flaps, wakes, efflux tubes, barriers, free surfaces, interior ducts, fans, and so on.

By 1973, Boeing had acquired a CDC 6600 for scientific computing, which allowed the TA230 program to solve problems involving hundreds of panels. This was sufficient to model full configurations with the fidelity necessary to understand component interactions.

One of the most impressive early uses of the TA230 code was in the initial design phase of the B747 Space Shuttle Carrier Aircraft (SCA). The purpose of the initial design phase was to define the modifications needed to accomplish the following missions: to ferry the Space Shuttle Orbiter; to air-launch the Orbiter; and to ferry the external fuel tank. To keep the cost of the program to a minimum, CFD was extensively used to investigate the Orbiter attitude during the ferry mission, the Orbiter trajectory and attitude during the launch test, and the external tank location and attitude during the ferry mission. At the conclusion of the design phase, the final configurations selected were tested in the wind tunnel to verify predictions. A typical example of a paneling scheme of the B747 with the Space Shuttle Orbiter is depicted in Fig. 7. In this example, the Orbiter incidence angle was 8 deg with respect to the B747 reference plane. The predicted lift coefficient, CL, as a function of wing angle of attack for this configuration is shown in Fig. 8. The agreement between the analyses and wind tunnel data shown in this figure is excellent.

TA230 was used with TA378 [16], a 3D Vortex Lattice Method with design/optimization capability, to develop winglets for a KC-135 aircraft. Wind tunnel tests confirmed a 7–8% drag reduction in airplane drag due to the installation of these winglets [17].

Another early CFD success was the improvement of the understanding of the interference drag of a pylon-mounted engine nacelle under the wing. The existence of unwanted interference drag had been revealed by wind tunnel testing, but the physical mechanism of the interference was still unknown. To avoid the interference drag, it is common practice to move the engine away from the wing. The resulting additional weight and drag due to the longer engine strut must be balanced against the potential interference drag if the engine is moved closer to the wing. CFD studies with TA230 along with specialized wind tunnel testing in the mid-1970s, provided the necessary insight into the flow mechanism responsible for the interference. This understanding led to the development of design guidelines that allowed closer coupling of the nacelle to the wing [18]. The Boeing 757, 767, 777, 737-300/400/500 series, Next Generation 737/600/700/800/900 series, and the KC-135R are all examples of aircraft where very closely coupled nacelle installations were achieved without incurring a significant drag penalty.

4.1.3. Second generation linear potential flow method––PANAIR/A502 The success of the TA 230 code in modeling complete vehicle configurations and component interactions created a strong demand among Boeing aerodynamicists for CFD analyses and was undoubtedly the key factor that initiated the paradigm shift toward acceptance of CFD as an equal partner to the wind tunnel and flight test in the analysis and design of commercial aircraft. However, the paradigm shift was slowed by the fact that the code had to be run by experts possessing specialized knowledge, some of which was totally unrelated to aerodynamics. In fact, it often took weeks requiring the expertise of an engineer having months or years of experience with the method to set up and run a complex configuration. To some extent this was unavoidable; to correctly model a complex flow for which no previous user experience was available, the engineer had to understand the mathematical properties and limitations of potential flow. Nevertheless, once the boundary value problem was formulated, the user still had to contend with certain numerical idiosyncrasies and inefficiencies that required adherence to stringent paneling rules, frequently incompatible with the complex geometrical contours and rapidly changing aerodynamic

length scales of the vehicle under analysis. Such difficulties were directly related to the use of flat panels with constant source and doublet strengths. Methods employing these features were quite sensitive to panel layout. Numerical problems arose when panel shapes and sizes varied, and fine paneling in regions of rapid flow variations often forced fine paneling elsewhere. In addition, excessive numbers of panels were often required since numerical accuracy was strongly affected by local curvature and singularity strength gradient. These problems placed severe limitations on the development of automatic panelers and other complementary aids aimed at relieving the user of the large amount of handwork and judgments associated with producing accurate numerical solutions.

Consequently, a method was developed under contract to NASA to enhance practical usability by improving upon the flat, constant singularity strength panels employed in the construction of networks [19]. This method featured the use of curved panels and higher order distributions of singularities. Source and doublet strengths were defined by least square fits of linear and quadratic splines to discrete values located at specific points on the networks. Higher order influence coefficients were obtained using recursion relations with the standard low order coefficients as initial conditions. Boundary conditions were enforced at the same or other discrete locations depending on their type. Virtually any boundary condition that made sense mathematically was provided for. In particular, the incorporation of Dirichlet boundary conditions not only offered the opportunity to design surface segments to achieve desired pressure distributions, but also clarified the nature of the boundary value problem associated with modeling viscous wakes and propulsion effects. Robin boundary conditions provided for the modeling of slotted walls, which allowed for direct comparisons of CFD results with wind tunnel data. These features were incorporated in the NASA code known as PANAIR and the Boeing code known as A502. The latter code was generalized to treat supersonic flows [20], free vortex flows [21], and time harmonic flows [22]. In the supersonic case, upwinding was achieved by forward weighting the least square singularity spline fits.

The numerics incorporated into A502 solved a number of usability issues. Fig. 9 clearly demonstrates the relative insensitivity and stability of computed results to paneling. This insensitivity encouraged project users to apply the code and trust results. In addition, the boundary condition flexibility allowed users to experiment with various types of modeling, leading to a wide variety of applications never entirely envisioned by the developers.

The versatility of A502 paid off when a ‘‘surprise’’ was encountered during the precertification flight testing of the then new 737-300. The aircraft was not demonstrating the preflight wind tunnel based prediction of take-off lift/drag ratio. A fix was needed quickly to meet certification and delivery schedules. Specialized flight testing was undertaken to find the cause and to fix the performance shortfall. A CFD study was immediately undertaken to enhance understanding and provide guidance to the flight program. Eighteen complete configuration analyses were carried out over a period of three months. These included different flap settings, wind tunnel and flight wing twist, flow through and powered nacelle simulations, free air and wind tunnel walls, ground effect, seal and slotted flaps, and other geometric variations [23]. These solutions explained and clarified the limitations of previous low-speed wind tunnel test techniques and provided guidance in recovering the performance shortfall through ‘‘tuning’’ of the flap settings during the flight testing. The aircraft was certified and delivered on schedule. A comparison of the computation L/D predictions with flight is shown in Fig. 10.

A502 studies have been used to support other flight programs on a time-critical basis. In particular, the code was used to support engine/airframe installation studies in the early 1980s [24], to evaluate wind tunnel tare and interference effects, and to provide Mach blockage corrections for testing large models. In addition, the code was used for the design of the wingtip pod for the Navy E6-A, a version of the Boeing 707. No wind tunnel testing was done before flight. The FAA has accepted A502 analysis for certification of certain aircraft features that were shown to have minimal change from previous accepted standards. Finally, A502 was used to develop a skin waviness criteria and measurement technique that led to the virtual elimination of failed altimeter split testing during the first flight of every B747-400 aircraft coming off the production line. Initially, one of every three aircraft was failing this test, requiring several days down time to fix the problem. The A502-based procedure could identify excessive skin waviness before first flight and led to manufacturing improvements to eliminate the root cause of the problem.

A502 is still used today to provide quick estimates for preliminary design studies. A relatively new feature of the code takes advantage of available linear sensitivities to predict a large number of perturbations to stability and control characteristics and stability derivatives, including control surface sensitivities. Virtual control surface deflections and rotary dynamic derivatives are modeled through surface panel transpiration. Stability derivatives, such as the lift curve slope or directional stability, are calculated automatically. A typical application may involve 20 subcases submitted in a single run, with solutions available in an hour or so. Within the limitations of the code, all major stability and control derivatives can be generated in a single run (at a single Mach). The method is typically used to calculate increments between similar configurations. The code was recently used to calculate stability and control increments between a known baseline and a new configuration. A total of 2400 characteristics were computed for eight configurations by one engineer in a two-day period!

4.2. Full potential/coupled boundary layer methods

4.2.1. A488/A411 Since Murman and Cole [25] introduced a numerical solution method for the transonic small disturbance equation in the early 1970s, computational fluid dynamics method development for nonlinear flows has progressed rapidly. Jameson and Caughey [26] formulated a fully conservative, rotated finite volume scheme to solve the full potential equation––the well-known FLO27/28 codes. The Boeing Company acquired the codes and invested a significant amount of effort to advance the capability from Phase II to Phase V. Convergence reliability and solution accuracy were enhanced. To allow transonic analyses over complex transport configurations, a numerical grid generation method based on Thompson's elliptic grid generation approach [27] was developed [28] and tested extensively for wing or nacelle alone, wing-body, and wing-body-strut-nacelle configurations. The potential flow solvers FLO27/28 coupled with the 3D finite difference boundary layer code A411 [29] and the 3D grid generation code formed the major elements of the Boeing transonic flow analysis system, A488––the most heavily used analysis code at Boeing from late 1970s to early 1990s. The production version of the A488 system, illustrated in Fig. 11, included a number of preprocessing and postprocessing programs that could handle the complete analysis process automatically for specific configuration topologies––a truly useable code for design engineers. This integrated packaged combined the various software components to go from ‘‘lofts to plots’’ in the time scale consistent with a fast paced engineering program––overnight!

Fig. 12 shows a comparison of A488 results obtained by project engineers with wing pressure distributions measured in flight on a 737-300. The computational model consisted of the wing, body, strut, and nacelle. The wing definition included the estimated aeroelastic twist for the condition flown. Although the character of the pressure distribution on the wing changes dramatically across the span, the computational results agree reasonably well with the measured data.

The Boeing Propulsion organization also employed a full potential/coupled boundary layer code called P582. It was developed at Boeing and used a rectangular grid [30] and multigrid acceleration scheme [31]. P582 was used extensively for engine inlet simulation and design in the late 1970s and 1980s and is still used in the Propulsion organization for various nacelle inlet simulations.

4.2.2. TRANAIR By 1983, complex configurations were routinely being analyzed by project engineers using panel methods. Surface geometry generation tools were maturing, and users took for granted the ability to add, move, or delete components at will; readily change boundary condition types; and obtain numerically accurate solutions at reasonable cost in a day or two. On the other hand, the nonlinear potential flow codes required expert users and considerable flow time to obtain converged and accurate results on new and nonstandard configurations. Often, geometrical simplifications had to be made jeopardizing the validity of conclusions regarding component interactions. Clearly, the nonlinear nature of the flow was responsible for numerous difficulties. The development of shocks in the flowfield prolonged convergence, especially if the shocks were strong and prematurely set in the wrong location. Moreover, weak and double shocks were often not captured accurately, if at all. Boundary layer coupling contributed problems as well, especially as separation was approached. Often, the boundary layer displacement effect had to be fixed after a certain number of iterations, leading to questionable results. Experts became very good at circumventing many of these problems; however, the one problem that could not readily be overcome was the necessity to generate a volume grid to capture nonlinear effects.

Even today, volume grid generation is one of the main barriers to routine use of nonlinear codes. Often the creation of a suitable grid about a new complex configuration can take weeks, if not months. In the early 1980s, the situation was far worse, and suitable grids were readily available only for standard and relatively simple configurations. Because of the enormous promise demonstrated by existing nonlinear methods, the panel method developers at Boeing were awarded a contract from NASA to investigate alternatives to surface fitted grid generation. In the next few paragraphs, we describe some of the technical issues that arose during this contract. They are of interest to this paper in that they followed directly from a ‘‘needs and usability’’ starting point rather than the usual ‘‘technology discovery’’ starting point. To a large extent, this has characterized the CFD development efforts at Boeing.

The developers started with a rather naıve approach, i.e., take an A502 paneling, with which the project users were already familiar, and embed it in a uniform rectangular grid to capture nonlinear effects (Fig. 13). This approach logically led to a sequence of subproblems that had to be addressed in turn [32]. First, one could hardly afford to extend a uniform grid into the far field to ensure proper far field influence. However, if the flow was assumed to be linear outside a compact region enclosing the configuration, one could use linear methods to obtain the far field influence. A discrete Green's function for the Prandtl–Glauert equation was constructed, which incorporated the effect of downstream sources and sinks resulting from wakes. This Green's function was applied using FFTs and the doubling algorithm of Hockney [33], a standard technique in astrophysics. The net effect was the same as if the uniform grid extended all the way to infinity, the only approximation being the assumption of linearity outside a compact box. As a byproduct of this solution, the user no longer had to estimate a suitable far field stretching ratio.

The next problem that had to be addressed was how to handle the intersections of the grid with the paneling and how to apply boundary conditions. The developers decided to use a finite element approach based on the Bateman variational principle [34]. Upwinding was achieved by factoring the density at the centroid of the elements out of the stiffness integrals and then biasing it in an upwind direction. The elements intersecting the paneled boundary were assumed to have linear basis functions regardless of their shapes. Stiffness matrix integrals were then evaluated over the subset of the elements exposed to the flowfield. The integration was performed recursively using volume and then surface integration by parts. Additional surface integrals were added to impose the same variety of boundary conditions as available in A502.

The main problem with a uniform rectangular grid is its inability to capture local length scales of the geometry and flow. Consequently, grid refinement was an absolutely necessary feature of the approach. However, it was felt that solution adaptive grid refinement was necessary in any event to ensure accuracy, especially if the code was to be used by project engineers without the aid of the developers. The refinement mechanism was relatively straightforward, just divide each rectangular grid box into eight similar boxes (Fig. 14) and keep track of the refinement hierarchy using an efficient oct-tree data structure.

Development of a suitable error indicator was another matter, however. Mathematical theory certainly offered guidance here, but a surprising amount of engineering knowledge had to be injected into the process. A typical ‘‘gotch-ya’’ with a pure mathematical approach was the tendency of the refinement algorithm to capture the precise details of a wing tip vortex all the way from the trailing edge to the end of a wind tunnel diffuser.

The existence of refined grid complicated the design of a solution algorithm. Multigrid methods were somewhat of a natural here, but the developers were partial to direct solvers, as they had turned out to be so flexible for the panel codes, especially when it came to implementing unusual boundary conditions and coupling boundary layer equations and unknowns. They adopted a damped Newton method approach, with the Jacobian solved using a preconditioned GMRES iterative algorithm. A sparse direct solver was used as a preconditioner. Even with nested dissection ordering, the cost and storage for a complete factorization was prohibitive, hence they settled on the use of an incomplete factorization employing a dynamic drop tolerance approach, whereby small fill-in elements were dropped as they were formed. The method was surprisingly efficient and robust. As a rule, decomposition of the Jacobian resulted in fill-in factors of less than two and constituted less than 10% of the total run cost, even for grids having more than a million nodes.

Early versions of TRANAIR used the A411 boundary layer code in an indirectly coupled mode in much the same manner as A488. However, the desired convergence reliability was never achieved, and the shock boundary layer interaction model was occasionally suspect. About this time, Drela [35] developed an exceedingly accurate 2D integral boundary layer that he directly coupled with his 2D Euler solver. With Drela's help, the TRANAIR development team modified this boundary layer to incorporate sweep and taper effects and integrated it into the code. In this connection, the use of a direct solver was invaluable. The resultant code turned out to be very accurate for transport configurations and agreement with experiment was considered by project users to be quite remarkable.

As TRANAIR received increasing use, a number of enhancements were added. To model powered effects, regions of non-freestream but constant total temperature and pressure were simulated along with appropriate shear layer effects [36]. Far field drag calculations were added, which later led to the ability to perform aerodynamic optimization. Time harmonic capability was created for stability and control calculations. Aeroelastic effects were simulated by adding structural unknowns and equations to the system [37]. Here again the use of a sparse solver was invaluable.

Without question, the development of the TRANAIR code strongly benefited from the work and experiences of CFD pioneers such as Murman [25], Jameson [26], Hafez [38], Cebeci [39], McLean [29], Drela [35], and others. Nevertheless, about 10 major and 30 minor algorithms had to be developed or adapted. A few were quite far from the mainstream CFD efforts of the time and required considerable effort. It took almost five years of research and development before a truly useful result could be produced (1989). The TRANAIR code ultimately evolved into the Boeing workhorse aerodynamic code of the 1990s and up to the current time for analyzing flows about complex configurations. TRANAIR was heavily used in the design of the 777, the 737NG, and all subsequent modifications and derivatives to the Boeing Commercial Airplanes fleet. Since 1989, it has been run to completion more than 70,000 times on an enormously wide variety of configurations, some of which were not even vehicles. It has had about 90 users in Boeing. An older version of the code was used by NASA, the Air Force, the Navy, and General Aviation. In 2002, TRANAIR was run to completion at Boeing more than 15,000 times, which is considerable use for a complex geometry CFD code. If we had to choose one single technical feature of TRANAIR that was responsible for such widespread use, we would choose solution adaptive grid refinement. In retrospect, while this feature was intended to improve accuracy, its main benefit was to greatly relieve the user of the burdensome and labor-intensive task of generating a volume grid.

Even with substantially simplified gridding requirements, inputting a general geometry CFD code and processing the outputs are still formidable tasks. An essential enabler for TRANAIR has been the development of a packaged process for inputting ‘‘standard’’ configurations. By ‘‘standard,’’ we mean those configuration types that have been scripted in the various components that make up the process. Configurations not included in the ‘‘standard’’ can still be analyzed but will not benefit from the same degree of automation. This package, illustrated in Fig. 15, is compatible and takes advantage of common Boeing Commercial Airplanes processes for geometry and postprocessing. At the center of this process is the TRANAIR flow solver. AGPS scripts have been developed to automate the paneling of ‘‘standard’’ configurations from AGPS lofts. AGPS scripts have also been developed to generate the input deck for the TRANAIR solver. These inputs define the flight conditions, solution adaptive gridding strategy, and the boundary layer inputs for ‘‘standard’’ configurations. A UNIX script is available to generate the various job control files to execute the solver on several types of computers. The TRANAIR solver generates several files for restarts of the solver and output processor, output files for various aerodynamic parameters, and a file for flowfield parameters. A special-purpose code, compatible with the unique TRANAIR grid structure, is available to view the flowfield properties. The package enables setting up and submitting for solution a ‘‘standard’’ configuration from AGPS lofts in one or two hours. Complete solutions from ‘‘lofts to plots’’ are frequently available in less than 12 h. ‘‘Standard’’ configurations include transport configurations including, for example, four-engine 747-like aircraft with underwing struts and nacelles and vertical and horizontal stabilizer with boundary layer on both wing and body.

During the aerodynamic design of the Boeing 777 in the early 1990s, the risk of significant interference drag due to the exhaust from the large engines was revealed through TRANAIR analysis. Neither the earlier linear-based CFD methods nor conventional wind tunnel testing techniques, which did not simulate the exhaust, would have detected this potential problem. Only a very expensive powered-nacelle testing technique could assess these interference effects. Three different manufacturer's engines were being considered for the new aircraft. Using the powered testing technique to develop the engine installations would have added considerable expense. Moreover, such a wind tunnel based development would have unacceptable design flow time. Nonlinear transonic TRANAIR analysis by the product development engineers made it practical to address these installation problems including the effects of the engine exhaust flows in a timely manner. Had these problems gone undetected until late in the aircraft's development when the powered testing is usually done, any fixes would have been extremely expensive to implement.

Fig. 16 shows a comparison of TRANAIR results with test data from a similar configuration. TRANAIR's ability to provide insight to design changes allowed a close ‘‘Working Together’’ relationship between the various Boeing engineering disciplines and the engine manufacturers. It is noteworthy that the exhaust system of all three engines models is very similar in design, a feature found only on the 777. Key to the success of this application was the ability to model enough of the relevant physics and to provide solutions quickly enough to support the development schedule. The effect of CFD on the project was to provide information facilitating a closer working relationship between design groups. This enabled detecting problems early in the development process, when fixing or avoiding them was least expensive.

TRANAIR continues to see extensive use as the primary tool for transonic aerodynamic evaluation and design of commercial aircraft configurations. It is well suited for analysis in the attached and mildly separated flow portion of the flight envelope. For conditions with strong viscous interactions, one must resort to using the Navier–Stokes equations.

4.2.3. BLWF The BLWF code was developed by researchers at the Central Aerohydrodynamic Institute (TsAGI) and enhanced under contract with the Boeing Technology Research Center in Moscow, CIS [40]. It saw it first use at Boeing in 1994. The BLWF technology was very similar to the technology of the A488 system that had been developed internally at Boeing. However, it differed from A488 in that it had been designed and tuned for workstations and later, PC computing systems, instead of the large vector supercomputers that had been the main computational modeling tool within Boeing Commercial Airplanes. The tool was very responsive, providing solutions within minutes, rather than hours. The rapidity of response, along with the significant cost-of-use reduction by hosting on less expensive hardware systems, changed the nature of use of the modeling tool. New applications, such as Reynolds number corrections for wing loads, have become feasible with such a tool. This application requires solutions for about a dozen Mach numbers over a range of angles of attack (five to 10). Use of BLWF allows a database of hundreds of solutions to be generated in a matter of a few hours, rather than days or weeks. The code has also been used extensively in the preliminary design stage of aircraft definition. At this point in the airplane development cycle, there are typically a large number of significant changes in the aircraft definition, along with a need to understand the behavior of the configuration over a large range of conditions. BLWF allows more realistic modeling of the flight characteristics than other Preliminary Design methods and also provides an ability to obtain the information rapidly, allowing more effective cycling of the preliminary design through the evolution of an aircraft.

4.3. Euler/coupled boundary layer methods

The use of full potential/boundary layer coupling code reaches its limit in predicting airplane performance at off-design conditions where significant shock induced flow separations or vortex flows generated from sharp edges of the configuration, occur in the flowfield. The boundary layer approximation breaks down, and the irrotational/isentropic flow assumption is not a good approximation for such flow conditions. Moreover, wake locations must be estimated a priori, preventing the accurate analysis of flows where vortex interactions are an important feature.

Algorithm research in the early 1980s focused on solution of the Euler equations––the governing equations for inviscid fluid flows. The Boeing version of an Euler/boundary layer coupling code––A588 is based on FLO57 [41] coupled with the same boundary layer code A411 used in A488. The code also introduced a capability for simulating engine inlet and exhaust flows with various total pressures and total temperatures, as well as propfan engine power effects through the use of an actuator disk concept. A588 was the main analysis tool for isolated nacelle development studies until very recently. It provided accurate predictions of nacelle fan cowl pressure distributions, as well as fan cowl drag rise. The multiblock 3D Euler code was used extensively for the simulation of the propfan engine on The Boeing 7J7 program during the mid-1980s, as shown in Fig. 17. A key application was the evaluation of propfan engine installation effects on tail stability characteristics––including simulations that could not be accomplished in the wind tunnel.

Another Euler/integral boundary layer coupling code––A585, based on Drela and Giles [42], was developed in mid-1980s for 2D airfoil analysis and design. This code has been used extensively for advanced airfoil technology development, an essential capability for airplane product development engineers.

4.4. Navier–Stokes methods

The limitation of full potential or Euler/boundary layer coupling codes to flow regimes without significant flow separation leads to the development and application of solutions to Navier–Stokes equations, which are valid over the whole range of flight regime for most commercial airplanes. Finite difference schemes [43] or finite volume schemes with either artificial numerical dissipation [44] or Roe's upwind scheme [45] were developed and tested extensively during the late 1980s and early 1990s. At the same time, development of turbulence models for attached and separated flow simulations progressed rapidly. The simple zero equation Baldwin/Lomax model [46] was used extensively during the early stage of Navier–Stokes code applications. Later on, the Baldwin/Barth one equation model [47], the Spalart/Allmaras one equation model [48], together with Menter's shear-stress transport k–w model [49], were available, and were used for a wide range of flight conditions including massively separated flows.

4.4.1. Structure grid codes––Zeus TLNS3D/CFL3D, OVERFLOW Navier–Stokes technology using structured grids was well developed by the early 1990s and is available to the industry. However, most existing structured grid Navier–Stokes codes require the users to provide high-quality 3D grids to resolve detailed viscous flows near configuration surfaces and viscous wake regions. The task of grid generation––both surface grid and field grid––has become one of the essential elements, as well as the bottleneck in using Navier–Stokes technology for complex configuration/complex flow analysis. In addition, most Navier–Stokes solvers have not been thoroughly checked out and validated for numerical accuracy, convergence reliability, and application limitations. Boeing has acquired several Navier–Stokes codes from NASA, as well from other research organizations, and has devoted a great deal of effort testing the codes and validating numerical results with available wind tunnel and flight data. In addition, to make the codes usable tools for engineering design, Boeing CFD developers have rewritten a 3D grid generation code through the use of an advancing front approach [50], so that a precise control on grid quality, such as grid spacing, stretching ratio, and grid orthogonality near configuration surfaces can be achieved. This is an important requirement for accurate resolution of viscous flow regions for all existing Navier–Stokes solvers. Two structured grid generation approaches are currently in use (i.e., the matched/patched multiblock grid approach and the overset or overlap grid approach). The former approach subdivides the flowfield into a number of topologically simple regions, such that in each region high quality grid can be generated. This is a rather time-consuming and tedious process for complex configuration analysis. However, once this ‘‘blocking’’ process is done for one configuration, a similar configuration can be done easily through the use of script or command files. The TLNS3D/CFL3D based Zeus Navier–Stokes analysis system [51] developed and used at Boeing for Loads and Stability and Control applications belongs to this structured, multiblock grid approach. The Zeus analysis system inherited the process developed in the A488 system, which packaged many user-friendly preprocessing programs that handled geometry and flow condition input as well as postprocessing programs that printed and plotted wing sectional data and airplane force and moment data. This has allowed the design engineers to reduce their input to just geometry lofts and flight conditions and obtain the solution within a few hours or overnight depending on the size of the problem and the availability of the computing resources. The Zeus system is illustrated in Fig. 18.

Some recent applications of using the Zeus Navier–Stokes analysis system include the prediction of Reynolds number effects on tail effectiveness, shown in Fig. 19. CFD results captured the effect of Reynolds number on horizontal tail boundary layer health and on tail effectiveness quite well.

Another application is the simulation of vortex generators on a complete airplane configuration [52] as shown in Fig. 20. The effects of vortex generators on airplane pitch characteristics are shown. Again, the results compare reasonably well with flight data with respect to predicting airplane pitch characteristics, even at relatively high angles of attack where the flow is massively separated. The CFD solution also provides flowfield details that illustrate the flow physics behind how vortex generators work to improve high-speed handling characteristics, a very useful tool for design engineers in selecting and placing vortex generators on lifting surfaces.

The second structured grid Navier–Stokes method uses the overset grid approach, whereby the flowfield grid is generated for each component of the configuration independently. Each set of grid overlaps with other set or sets of grid, and communication between various sets of grid is achieved through numerical interpolation in the overlap region. The advantage of this approach is that each component of the configuration is relatively simple, and a high-quality local grid can be easily generated. However, one pays the price of performing complex 3D interpolation with some risk of degrading overall numerical accuracy. The OVERFLOW code [43] used at Boeing for high-speed and high-lift configuration analysis belongs to this overset/overlap structured grid approach. Fig. 21 shows the overset grids and OVERFLOW solution of a complex high-lift system, including all high-lift components of the airplane [53]. Results agree well with experimental data for low to moderate angle of attacks. At high angle of attack, there are complex flow separations in the flap and slat gap regions, which could not be simulated adequately with the current one- or two-equation turbulence models. Improvements in turbulence models for separated flow simulation, as well as Navier–Stokes solver accuracy and robustness, are essential for a reliable prediction of airplane high-lift performance, as well as airplane pitch characteristics.

Another important element for successful use of Navier–Stokes technology in airplane design and analysis is the availability of high-performance computing. All Navier–Stokes codes require large memory and many CPU hours to resolve viscous flows over an airplane configuration. The rapid development of parallel computing hardware and software, as well as PC clusters with large number of CPUs, have made the use of Navier–Stokes technology in practical airplane design and analysis a reality. The analysis of an airplane configuration with 16 vortex generators on each side of the wing consists of approximately 25 million points. Using 56 CPUs on a SGI Origin 2000 machine, the CFD solution for each flight condition can be obtained within 11 h of flow time.

4.4.2. Unstructured grid codes––Fluent, NSU2D/3D, CFD++ The structured grid Navier–Stokes codes make highly efficient use of computer memory and processing power due to the well-ordered data structure used in the solution algorithm. However, they suffer two major drawbacks; i.e., the lack of flexibility in handling complex geometry and the difficulty of implementing solution adaptive gridding. These requirements, namely, complex geometry and solution adaptive capability, are essential for accurate and reliable predictions of airplane design and off-design performance. Consequently, it is less common and often more difficult to use CFD to analyze geometrically complex parts of the airplane, such as high-lift systems (flaps and slats), engine compartments, auxiliary power units, and so on. Paradoxically, the success of CFD in designing major components has eliminated many of the experiments that previously provided a ‘‘piggyback’’ opportunity to test these complicated devices. Consequently, there is an increased need to compute airflows around and through systems that are distinguished by very complex geometry and flow patterns. In the last decade, there has been impressive progress in unstructured grid Navier–Stokes code developments [54–57]. Boeing Commercial Airplanes has explored and used Fluent, the most recent unstructured grid Navier–Stokes codes NSU2D/NSU3D of Mavriplis [54], and CFD++ of Chakravarthy [57] for 2D and 3D high-lift analysis with success.

A recent application of unstructured grid technology involved the use of Fluent V5 [58] to investigate the behavior of the efflux from engine thrust reversers [59]. A typical commercial airplane deploys its thrust reversers briefly after touch down. A piece of engine cowling translates aft and blocker doors drop down, directing the engine airflow into a honeycomb structure called a cascade. The cascade directs the flow forward, which acts to slow the aircraft and decrease lift for more effective braking. There are some critical design considerations in properly directing the reversed flow. The reverser is used precisely at the time when high-lift devices, wing leading and trailing edge flaps and slats, are fully deployed. Consequently, the plumes of hot exhaust must be directed so as not to impinge on these devices. In addition, the plumes should not hit the fuselage or other parts of the aircraft. Moreover, reingestion (in which the reversed plume reenters the engine inlet), engine ingestion of debris blown up from the runway, and plume envelopment of the vertical tail (which affects directional control) must be avoided. To eliminate these effects, it's important for designers to know exactly where the exhaust plumes go.

The Tetra module of grid generation software from ICEM CFD Engineering [60] has been used to obtain fully unstructured meshes. Starting from a new airplane geometry (with cleaned up lofts), these meshes can be created in a day or two. The grid generation software contains a replay capability so that minor changes to the geometry can be remeshed quickly. Because the entire CFD analysis cycle can be completed in about three days, designers can use this tool repeatedly as a way to optimize the design. In this way, it is possible to map the performance of the reverser against the power setting of the reversed engine fan and the airplane forward speed. Tests that involve geometry changes, such as the repositioning of the cascades or the nacelle relative to the wing or variation of the cascade angles, can be accomplished with minimal remeshing and analysis. Wind tunnel testing and expense are reduced, but the key benefits are really time and risk mitigation. If a need to change the design should become apparent after the tooling was built and aircraft was in test, the delay in entry into service and the expense of retooling would be unacceptable. The grid and engine reverser efflux particle traces from one of these cases is illustrated in Fig. 22. Fluent is in widespread use at Boeing for other geometrically complex problems, such as cooling flows in engine compartments and dispersion of fire suppression chemicals.

4.4.3. Other Navier–Stokes codes The Propulsion Analysis group at Boeing Commercial Airplanes has long acquired, supported, and used a number of other Navier–Stokes codes. The present authors are not qualified to describe this activity; however, we do wish to mention some of the codes involved. These include the Boeing named Mach3 code based on the implicit predictor, corrector methodology of McCormack [61], the PARC code [62] of NASA Lewis, the WIND code [63], and BCFD [64], which is scheduled to be the platform for an Enterprise common Navier–Stokes code. These codes have been used for nacelle inlet analysis and design and for nacelle fan and core cowl nozzle performance studies [64,65].

4.4.4. Next generation Navier–Stokes codes The successful application of Navier–Stokes codes during the last 10 years has raised expectations among Boeing engineers that CFD can become a routine tool for the loads analysis,stability and control analysis, and high-lift design processes. In fact, there is considerable speculation that it may be possible to populate databases involving tens of thousands of cases with results from Navier–Stokes CFD codes, if dramatic improvements in computing affordability continue over the next five years. For the first time, the affordability per Navier–Stokes data point may rival that of a wind tunnel generated data point. Of course, project engineers use CFD and wind tunnel data in a complementary fashion so that cost is not a competitive issue here. Before Navier–Stokes codes can be routinely used to populate databases; however, accuracy, reliability, efficiency, and usability issues need to be addressed. Gaps in data, inconsistent data, and long acquisition times seriously degrade the utility of a database. Even with current user aids, the application of Navier–Stokes codes to new configurations generally requires the services of an expert user. The generation of a ‘‘good grid’’ is still somewhat of an art and often quite labor intensive. Although everyone realizes that a ‘‘good grid’’ is necessary for accuracy and even convergence, there is no precise definition of what constitutes a ‘‘good grid’’. In fact, the definition would probably vary from code to code and is certainly case dependent. Usability problems are reflected in the fact that although Navier–Stokes codes are now considered capable of generating more accurate results, they are used far less frequently than TRANAIR at Boeing Commercial Airplanes.

Much of the current effort to improve the usability of our Navier–Stokes codes would have to be termed evolutionary. As is always the case with evolutionary improvements, it is necessary to determine whether or not incremental improvements are approaching a horizontal asymptote, while implementation costs are mounting. Boeing is currently involved in an effort to reevaluate the current technology and explore alternatives, much the same as was done 20 years ago in the case of potential flow. The project is called General Geometry Navier–Stokes Solver (GGNS).

From our TRANAIR experience, it seems rather evident that solution adaptive grids must be an essential feature for reliability and usability. This is especially true when computing flows at off-design conditions where our understanding of the flow physics is limited, making it difficult to generate ‘‘good grids’’. However, these grids must now be anisotropic and, more than likely, quite irregular. This places a huge burden on improving discretization fidelity, as current discretization algorithms do not seem to do well with irregular spacings and cell shapes. Higher order elements are certainly desirable for efficiency's sake and for capturing latent features. However, stabilization and limiter technologies need to be advanced to handle such elements. Current solvers are relatively weak, and convergence is often incomplete, especially when turbulent transport equations are involved. Some of these issues are addressed in detail elsewhere [66]. It should be noted that our reevaluation and development work here is a joint effort between the CFD developers at Boeing and their colleagues at the Boeing Technical Research Center in Moscow. We also note there are related efforts going on elsewhere. We mention in particular the FAAST project at NASA Langley.

4.5. Design and optimization methods

4.5.1. A555, A619 inverse design codes Most existing CFD codes are analysis tools (i.e., given a configuration, the codes predict aerodynamic characteristics of the configuration). In airplane design, one would like to have tools that can provide design capability (i.e., given airplane aerodynamic characteristics, the codes generate realistic geometry). The design method used by Henne [67], which prescribes wing surface pressures and employs an iterative method to find the corresponding geometry, was one of the very first inverse design methods used in the airplane industry. Boeing Commercial Airplanes developed a similar method for wing design using the A555 code [68], illustrated in Fig. 23. This code was used extensively on the 7J7, 777, and 737NG programs. The code borrowed heavily from the A488 system to ensure usability in the fast-paced airplane development environment. On the Boeing 777 program, CFD contributed to a high degree of confidence in performance with only a three-cycle wing development program. Significantly fewer wing designs were tested for the 777 than for the earlier 757 and 767 programs. The resulting final design would have been 21% thinner without the ‘‘inverse design’’ CFD capability of A555. Such a wing would not have been manufacturable due to skin gages being too thick for the automatic riveting machines in the factory, and it would have less fuel volume. Conversely, if the wing could meet the skin gage and fuel volume requirements, the cruise Mach number would have had to be significantly slower. In either case, the airplane would not have achieved customer satisfaction. The effect of CFD wing design in this case was an airplane that has dominated sales in its class since being offered to the airlines.

More recently, Campbell [69] introduced a constrained, direct, iterative, surface curvature method (CDISC) for wing design. The method has been incorporated into both the structured grid single-block Navier–Stokes code A619 [70], and the overset grid code OVERFLOW/OVERDISC at Boeing. Both codes are in use for configuration design in the product development organization.

4.5.2. TRANAIR optimization Because of boundary condition generality, and in particular the use of transpiration to simulate surface movement, the TRANAIR code could have easily been substituted into the existing Boeing standard inverse aerodynamic design process, A555. However, the process itself had a number of issues. First and foremost was the difficulty of finding ‘‘good’’ pressure distributions for highly 3D flows. Such pressure distributions needed to result in acceptable off-design performance as well as low cruise drags. Although many rules of thumb were developed through the years, only a few highly experienced aerodynamicists could create acceptable distributions on a routine basis. Second, it was never clear whether the resultant designs were in fact optimal, a question of some importance in a highly competitive environment. Third, multidisciplinary constraints often had to be imposed after the fact leading to a highly iterative and time consuming process as well as potentially suboptimal designs.

A serendipitous result of the decision to use a powerful sparse solver to converge the TRANAIR analysis cases was the ability to rapidly generate solution sensitivities. In a sense, each sensitivity represented just another right hand side for the already decomposed analysis Jacobian matrix to solve. In addition, the adaptive grid capability allowed accurate tracking of changes in critical flow features predicted by these sensitivities. Formally, it was an easy matter to feed the sensitivities into an optimization driver such as NPSOL [71] and systematize the design process as illustrated in Fig. 24. However, optimization codes have been notorious for promising spectacular results and then falling flat because of overly simplistic mathematical realizations of the problems. Aerodynamic design requires understanding of very complicated geometric, flow and interdisciplinary constraints. These constraints are rather nebulous and often exist only in the minds of the designers. An initial optimization capability using TRANAIR was available in 1992 [72], but it took several more years before project users were willing to trust their design processes to optimization [73]. A wide variety of payoff functions and constraints were built into TRANAIR, but the one component of a payoff function that users were really interested in was, of course, drag. Consequently, a great deal of effort was invested in numerical work to improve TRANAIR's drag calculations. Careful studies in the mid-1990s [74] then validated the ability of TRANAIR to compute accurate drag increments for subsonic transports.

At the same time, a multipoint optimization capability was introduced, since it was well understood that drag minimization at a single flight condition was somewhat ill-posed and often led to unacceptable off design characteristics. Moreover, users desired capability for simultaneously optimizing slightly different configurations having major portions of their geometries in common. By 1997, TRANAIR optimization had replaced inverse design as the preferred aerodynamic design process for flight conditions where full potential/boundary layer modeling is applicable. At the current time, the code can handle as many as 600 geometry degrees of freedom and 45,000 nonlinear inequalities. These inequalities represent the pointwise application of roughly 25 different types of flow and geometry constraints. The code has seen extensive use in the design of a large variety of configurations covering the Mach range from transonic to Mach 2.4. This has contributed (in several cases critically) to detailed development studies for a number of vehicles, some of which are illustrated in Fig. 25.

TRANAIR design/optimization applications that have affected a product include the payload fairing on the Sea Launch rocket, nacelle fan cowl for the GE90-115B engine, and the process used to determine ‘‘Reduced Vertical Separation Minimums’’ compliance for new and in-service aircraft.

  1. Conclusions

During the last 30 years at Boeing Commercial Airplanes, Seattle, CFD has evolved into a highly valued tool for the design, analysis, and support of cost-effective and high-performing commercial transports. The application of CFD today has revolutionized the process of aerodynamic design, and CFD has joined the wind tunnel and flight test as a critical tool of the trade. This did not have to be the case; CFD could have easily remained a somewhat interesting tool with modest value in the hands of an expert as a means to assess problems arising from time to time. As the reader can gather from the previous sections, there are many reasons that this did not happen. The one we would like to emphasize in this Conclusion section is the fact that Boeing recognized the leverage in getting CFD into the hands of the project engineers and was willing to do all the things necessary to make it happen.



n5321 | 2025年7月29日 20:58