# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
-nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
+ccflags-y += -I$(src)/core/include
+ccflags-y += -I$(src)
+
+nouveau-y := core/core/mm.o
+nouveau-y += core/core/ramht.o
+
+nouveau-y += core/subdev/fb/nv04.o
+nouveau-y += core/subdev/fb/nv10.o
+nouveau-y += core/subdev/fb/nv20.o
+nouveau-y += core/subdev/fb/nv30.o
+nouveau-y += core/subdev/fb/nv40.o
+nouveau-y += core/subdev/fb/nv50.o
+nouveau-y += core/subdev/fb/nvc0.o
+nouveau-y += core/subdev/fb/nv50_vram.o
+nouveau-y += core/subdev/fb/nvc0_vram.o
+nouveau-y += core/subdev/gpio/base.o
+nouveau-y += core/subdev/gpio/nv10.o
+nouveau-y += core/subdev/gpio/nv50.o
+nouveau-y += core/subdev/i2c/base.o
+nouveau-y += core/subdev/instmem/nv04.o
+nouveau-y += core/subdev/instmem/nv50.o
+nouveau-y += core/subdev/instmem/nvc0.o
+nouveau-y += core/subdev/mc/nv04.o
+nouveau-y += core/subdev/mc/nv40.o
+nouveau-y += core/subdev/mc/nv50.o
+nouveau-y += core/subdev/timer/nv04.o
+nouveau-y += core/subdev/vm/base.o
+nouveau-y += core/subdev/vm/nv50.o
+nouveau-y += core/subdev/vm/nvc0.o
+
+nouveau-y += core/engine/bsp/nv84.o
+nouveau-y += core/engine/copy/nva3.o
+nouveau-y += core/engine/copy/nvc0.o
+nouveau-y += core/engine/crypt/nv84.o
+nouveau-y += core/engine/crypt/nv98.o
+nouveau-y += core/engine/fifo/nv04.o
+nouveau-y += core/engine/fifo/nv10.o
+nouveau-y += core/engine/fifo/nv17.o
+nouveau-y += core/engine/fifo/nv40.o
+nouveau-y += core/engine/fifo/nv50.o
+nouveau-y += core/engine/fifo/nv84.o
+nouveau-y += core/engine/fifo/nvc0.o
+nouveau-y += core/engine/fifo/nve0.o
+nouveau-y += core/engine/graph/ctxnv40.o
+nouveau-y += core/engine/graph/ctxnv50.o
+nouveau-y += core/engine/graph/ctxnvc0.o
+nouveau-y += core/engine/graph/ctxnve0.o
+nouveau-y += core/engine/graph/nv04.o
+nouveau-y += core/engine/graph/nv10.o
+nouveau-y += core/engine/graph/nv20.o
+nouveau-y += core/engine/graph/nv40.o
+nouveau-y += core/engine/graph/nv50.o
+nouveau-y += core/engine/graph/nvc0.o
+nouveau-y += core/engine/graph/nve0.o
+nouveau-y += core/engine/mpeg/nv31.o
+nouveau-y += core/engine/mpeg/nv50.o
+nouveau-y += core/engine/ppp/nv98.o
+nouveau-y += core/engine/vp/nv84.o
+
+nouveau-y += nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_gpuobj.o nouveau_irq.o nouveau_notifier.o \
nouveau_sgdma.o nouveau_dma.o nouveau_util.o \
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
- nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
+ nouveau_hw.o nouveau_calc.o \
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
- nouveau_hdmi.o nouveau_dp.o nouveau_ramht.o \
+ nouveau_hdmi.o nouveau_dp.o \
nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
- nouveau_mm.o nouveau_vm.o nouveau_mxm.o nouveau_gpio.o \
+ nouveau_mxm.o \
nouveau_abi16.o \
- nv04_timer.o \
- nv04_mc.o nv40_mc.o nv50_mc.o \
- nv04_fb.o nv10_fb.o nv20_fb.o nv30_fb.o nv40_fb.o \
- nv50_fb.o nvc0_fb.o \
- nv04_fifo.o nv10_fifo.o nv17_fifo.o nv40_fifo.o nv50_fifo.o \
- nv84_fifo.o nvc0_fifo.o nve0_fifo.o \
+ nouveau_bios.o \
nv04_fence.o nv10_fence.o nv84_fence.o nvc0_fence.o \
nv04_software.o nv50_software.o nvc0_software.o \
- nv04_graph.o nv10_graph.o nv20_graph.o \
- nv40_graph.o nv50_graph.o nvc0_graph.o nve0_graph.o \
- nv40_grctx.o nv50_grctx.o nvc0_grctx.o nve0_grctx.o \
- nv84_crypt.o nv98_crypt.o \
- nva3_copy.o nvc0_copy.o \
- nv31_mpeg.o nv50_mpeg.o \
- nv84_bsp.o \
- nv84_vp.o \
- nv98_ppp.o \
- nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
nv04_crtc.o nv04_display.o nv04_cursor.o \
nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o \
nvd0_display.o \
nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
- nv10_gpio.o nv50_gpio.o \
nv50_calc.o \
nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
- nv50_vram.o nvc0_vram.o \
- nv50_vm.o nvc0_vm.o nouveau_prime.o
+ nouveau_prime.o
nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <core/mm.h>
+
+static inline void
+region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
+{
+ list_del(&a->nl_entry);
+ list_del(&a->fl_entry);
+ kfree(a);
+}
+
+static struct nouveau_mm_node *
+region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
+{
+ struct nouveau_mm_node *b;
+
+ if (a->length == size)
+ return a;
+
+ b = kmalloc(sizeof(*b), GFP_KERNEL);
+ if (unlikely(b == NULL))
+ return NULL;
+
+ b->offset = a->offset;
+ b->length = size;
+ b->type = a->type;
+ a->offset += size;
+ a->length -= size;
+ list_add_tail(&b->nl_entry, &a->nl_entry);
+ if (b->type == 0)
+ list_add_tail(&b->fl_entry, &a->fl_entry);
+ return b;
+}
+
+#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
+ list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
+
+void
+nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
+{
+ struct nouveau_mm_node *prev = node(this, prev);
+ struct nouveau_mm_node *next = node(this, next);
+
+ list_add(&this->fl_entry, &mm->free);
+ this->type = 0;
+
+ if (prev && prev->type == 0) {
+ prev->length += this->length;
+ region_put(mm, this);
+ this = prev;
+ }
+
+ if (next && next->type == 0) {
+ next->offset = this->offset;
+ next->length += this->length;
+ region_put(mm, this);
+ }
+}
+
+int
+nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
+ u32 align, struct nouveau_mm_node **pnode)
+{
+ struct nouveau_mm_node *prev, *this, *next;
+ u32 min = size_nc ? size_nc : size;
+ u32 align_mask = align - 1;
+ u32 splitoff;
+ u32 s, e;
+
+ list_for_each_entry(this, &mm->free, fl_entry) {
+ e = this->offset + this->length;
+ s = this->offset;
+
+ prev = node(this, prev);
+ if (prev && prev->type != type)
+ s = roundup(s, mm->block_size);
+
+ next = node(this, next);
+ if (next && next->type != type)
+ e = rounddown(e, mm->block_size);
+
+ s = (s + align_mask) & ~align_mask;
+ e &= ~align_mask;
+ if (s > e || e - s < min)
+ continue;
+
+ splitoff = s - this->offset;
+ if (splitoff && !region_split(mm, this, splitoff))
+ return -ENOMEM;
+
+ this = region_split(mm, this, min(size, e - s));
+ if (!this)
+ return -ENOMEM;
+
+ this->type = type;
+ list_del(&this->fl_entry);
+ *pnode = this;
+ return 0;
+ }
+
+ return -ENOSPC;
+}
+
+int
+nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
+{
+ struct nouveau_mm_node *node;
+
+ if (block) {
+ mutex_init(&mm->mutex);
+ INIT_LIST_HEAD(&mm->nodes);
+ INIT_LIST_HEAD(&mm->free);
+ mm->block_size = block;
+ mm->heap_nodes = 0;
+ }
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+ node->offset = roundup(offset, mm->block_size);
+ node->length = rounddown(offset + length, mm->block_size) - node->offset;
+
+ list_add_tail(&node->nl_entry, &mm->nodes);
+ list_add_tail(&node->fl_entry, &mm->free);
+ mm->heap_nodes++;
+ return 0;
+}
+
+int
+nouveau_mm_fini(struct nouveau_mm *mm)
+{
+ struct nouveau_mm_node *node, *heap =
+ list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
+ int nodes = 0;
+
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ if (nodes++ == mm->heap_nodes) {
+ printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
+ list_for_each_entry(node, &mm->nodes, nl_entry) {
+ printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
+ node->type, node->offset, node->length);
+ }
+ WARN_ON(1);
+ return -EBUSY;
+ }
+ }
+
+ kfree(heap);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include <core/ramht.h>
+
+static u32
+nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_ramht *ramht = chan->ramht;
+ u32 hash = 0;
+ int i;
+
+ NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
+
+ for (i = 32; i > 0; i -= ramht->bits) {
+ hash ^= (handle & ((1 << ramht->bits) - 1));
+ handle >>= ramht->bits;
+ }
+
+ if (dev_priv->card_type < NV_50)
+ hash ^= chan->id << (ramht->bits - 4);
+ hash <<= 3;
+
+ NV_DEBUG(dev, "hash=0x%08x\n", hash);
+ return hash;
+}
+
+static int
+nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
+ u32 offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 ctx = nv_ro32(ramht, offset + 4);
+
+ if (dev_priv->card_type < NV_40)
+ return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
+ return (ctx != 0);
+}
+
+static int
+nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
+ struct nouveau_gpuobj *ramht, u32 offset)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ u32 ctx = nv_ro32(ramht, offset + 4);
+
+ if (dev_priv->card_type >= NV_50)
+ return true;
+ else if (dev_priv->card_type >= NV_40)
+ return chan->id ==
+ ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
+ else
+ return chan->id ==
+ ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
+}
+
+int
+nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
+ struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_ramht_entry *entry;
+ struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
+ unsigned long flags;
+ u32 ctx, co, ho;
+
+ if (nouveau_ramht_find(chan, handle))
+ return -EEXIST;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ entry->channel = chan;
+ entry->gpuobj = NULL;
+ entry->handle = handle;
+ nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
+
+ if (dev_priv->card_type < NV_40) {
+ ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
+ (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else
+ if (dev_priv->card_type < NV_50) {
+ ctx = (gpuobj->pinst >> 4) |
+ (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+ } else {
+ if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
+ ctx = (gpuobj->cinst << 10) |
+ (chan->id << 28) |
+ chan->id; /* HASH_TAG */
+ } else {
+ ctx = (gpuobj->cinst >> 4) |
+ ((gpuobj->engine <<
+ NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
+ }
+ }
+
+ spin_lock_irqsave(&chan->ramht->lock, flags);
+ list_add(&entry->head, &chan->ramht->entries);
+
+ co = ho = nouveau_ramht_hash_handle(chan, handle);
+ do {
+ if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
+ NV_DEBUG(dev,
+ "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ chan->id, co, handle, ctx);
+ nv_wo32(ramht, co + 0, handle);
+ nv_wo32(ramht, co + 4, ctx);
+
+ spin_unlock_irqrestore(&chan->ramht->lock, flags);
+ instmem->flush(dev);
+ return 0;
+ }
+ NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
+ chan->id, co, nv_ro32(ramht, co));
+
+ co += 8;
+ if (co >= ramht->size)
+ co = 0;
+ } while (co != ho);
+
+ NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
+ list_del(&entry->head);
+ spin_unlock_irqrestore(&chan->ramht->lock, flags);
+ kfree(entry);
+ return -ENOMEM;
+}
+
+static struct nouveau_ramht_entry *
+nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
+{
+ struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
+ struct nouveau_ramht_entry *entry;
+ unsigned long flags;
+
+ if (!ramht)
+ return NULL;
+
+ spin_lock_irqsave(&ramht->lock, flags);
+ list_for_each_entry(entry, &ramht->entries, head) {
+ if (entry->channel == chan &&
+ (!handle || entry->handle == handle)) {
+ list_del(&entry->head);
+ spin_unlock_irqrestore(&ramht->lock, flags);
+
+ return entry;
+ }
+ }
+ spin_unlock_irqrestore(&ramht->lock, flags);
+
+ return NULL;
+}
+
+static void
+nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+ struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
+ unsigned long flags;
+ u32 co, ho;
+
+ spin_lock_irqsave(&chan->ramht->lock, flags);
+ co = ho = nouveau_ramht_hash_handle(chan, handle);
+ do {
+ if (nouveau_ramht_entry_valid(dev, ramht, co) &&
+ nouveau_ramht_entry_same_channel(chan, ramht, co) &&
+ (handle == nv_ro32(ramht, co))) {
+ NV_DEBUG(dev,
+ "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+ chan->id, co, handle, nv_ro32(ramht, co + 4));
+ nv_wo32(ramht, co + 0, 0x00000000);
+ nv_wo32(ramht, co + 4, 0x00000000);
+ instmem->flush(dev);
+ goto out;
+ }
+
+ co += 8;
+ if (co >= ramht->size)
+ co = 0;
+ } while (co != ho);
+
+ NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
+ chan->id, handle);
+out:
+ spin_unlock_irqrestore(&chan->ramht->lock, flags);
+}
+
+int
+nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
+{
+ struct nouveau_ramht_entry *entry;
+
+ entry = nouveau_ramht_remove_entry(chan, handle);
+ if (!entry)
+ return -ENOENT;
+
+ nouveau_ramht_remove_hash(chan, entry->handle);
+ nouveau_gpuobj_ref(NULL, &entry->gpuobj);
+ kfree(entry);
+ return 0;
+}
+
+struct nouveau_gpuobj *
+nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
+{
+ struct nouveau_ramht *ramht = chan->ramht;
+ struct nouveau_ramht_entry *entry;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ unsigned long flags;
+
+ if (unlikely(!chan->ramht))
+ return NULL;
+
+ spin_lock_irqsave(&ramht->lock, flags);
+ list_for_each_entry(entry, &chan->ramht->entries, head) {
+ if (entry->channel == chan && entry->handle == handle) {
+ gpuobj = entry->gpuobj;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ramht->lock, flags);
+
+ return gpuobj;
+}
+
+int
+nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+ struct nouveau_ramht **pramht)
+{
+ struct nouveau_ramht *ramht;
+
+ ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
+ if (!ramht)
+ return -ENOMEM;
+
+ ramht->dev = dev;
+ kref_init(&ramht->refcount);
+ ramht->bits = drm_order(gpuobj->size / 8);
+ INIT_LIST_HEAD(&ramht->entries);
+ spin_lock_init(&ramht->lock);
+ nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
+
+ *pramht = ramht;
+ return 0;
+}
+
+static void
+nouveau_ramht_del(struct kref *ref)
+{
+ struct nouveau_ramht *ramht =
+ container_of(ref, struct nouveau_ramht, refcount);
+
+ nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
+ kfree(ramht);
+}
+
+void
+nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
+ struct nouveau_channel *chan)
+{
+ struct nouveau_ramht_entry *entry;
+ struct nouveau_ramht *ramht;
+
+ if (ref)
+ kref_get(&ref->refcount);
+
+ ramht = *ptr;
+ if (ramht) {
+ while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
+ nouveau_ramht_remove_hash(chan, entry->handle);
+ nouveau_gpuobj_ref(NULL, &entry->gpuobj);
+ kfree(entry);
+ }
+
+ kref_put(&ramht->refcount, nouveau_ramht_del);
+ }
+ *ptr = ref;
+}
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include <subdev/vm.h>
+#include <core/ramht.h>
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ * more than just an enable/disable stub this needs to be split out to
+ * nv98_bsp.c...
+ */
+
+struct nv84_bsp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00008000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+ return 0;
+}
+
+static int
+nv84_bsp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
+ return 0;
+}
+
+static void
+nv84_bsp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, BSP);
+
+ kfree(pbsp);
+}
+
+int
+nv84_bsp_create(struct drm_device *dev)
+{
+ struct nv84_bsp_engine *pbsp;
+
+ pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
+ if (!pbsp)
+ return -ENOMEM;
+
+ pbsp->base.destroy = nv84_bsp_destroy;
+ pbsp->base.init = nv84_bsp_init;
+ pbsp->base.fini = nv84_bsp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
+ return 0;
+}
--- /dev/null
+/* fuc microcode for copy engine on nva3- chipsets
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build for nva3:nvc0
+ * m4 -DNVA3 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nva3_copy.fuc.h
+ *
+ * To build for nvc0-
+ * m4 -DNVC0 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_copy.fuc.h
+ */
+
+ifdef(`NVA3',
+.section #nva3_pcopy_data
+,
+.section #nvc0_pcopy_data
+)
+
+ctx_object: .b32 0
+ifdef(`NVA3',
+ctx_dma:
+ctx_dma_query: .b32 0
+ctx_dma_src: .b32 0
+ctx_dma_dst: .b32 0
+,)
+.equ #ctx_dma_count 3
+ctx_query_address_high: .b32 0
+ctx_query_address_low: .b32 0
+ctx_query_counter: .b32 0
+ctx_src_address_high: .b32 0
+ctx_src_address_low: .b32 0
+ctx_src_pitch: .b32 0
+ctx_src_tile_mode: .b32 0
+ctx_src_xsize: .b32 0
+ctx_src_ysize: .b32 0
+ctx_src_zsize: .b32 0
+ctx_src_zoff: .b32 0
+ctx_src_xoff: .b32 0
+ctx_src_yoff: .b32 0
+ctx_src_cpp: .b32 0
+ctx_dst_address_high: .b32 0
+ctx_dst_address_low: .b32 0
+ctx_dst_pitch: .b32 0
+ctx_dst_tile_mode: .b32 0
+ctx_dst_xsize: .b32 0
+ctx_dst_ysize: .b32 0
+ctx_dst_zsize: .b32 0
+ctx_dst_zoff: .b32 0
+ctx_dst_xoff: .b32 0
+ctx_dst_yoff: .b32 0
+ctx_dst_cpp: .b32 0
+ctx_format: .b32 0
+ctx_swz_const0: .b32 0
+ctx_swz_const1: .b32 0
+ctx_xcnt: .b32 0
+ctx_ycnt: .b32 0
+.align 256
+
+dispatch_table:
+// mthd 0x0000, NAME
+.b16 0x000 1
+.b32 #ctx_object ~0xffffffff
+// mthd 0x0100, NOP
+.b16 0x040 1
+.b32 0x00010000 + #cmd_nop ~0xffffffff
+// mthd 0x0140, PM_TRIGGER
+.b16 0x050 1
+.b32 0x00010000 + #cmd_pm_trigger ~0xffffffff
+ifdef(`NVA3', `
+// mthd 0x0180-0x018c, DMA_
+.b16 0x060 #ctx_dma_count
+dispatch_dma:
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+.b32 0x00010000 + #cmd_dma ~0xffffffff
+',)
+// mthd 0x0200-0x0218, SRC_TILE
+.b16 0x80 7
+.b32 #ctx_src_tile_mode ~0x00000fff
+.b32 #ctx_src_xsize ~0x0007ffff
+.b32 #ctx_src_ysize ~0x00001fff
+.b32 #ctx_src_zsize ~0x000007ff
+.b32 #ctx_src_zoff ~0x00000fff
+.b32 #ctx_src_xoff ~0x0007ffff
+.b32 #ctx_src_yoff ~0x00001fff
+// mthd 0x0220-0x0238, DST_TILE
+.b16 0x88 7
+.b32 #ctx_dst_tile_mode ~0x00000fff
+.b32 #ctx_dst_xsize ~0x0007ffff
+.b32 #ctx_dst_ysize ~0x00001fff
+.b32 #ctx_dst_zsize ~0x000007ff
+.b32 #ctx_dst_zoff ~0x00000fff
+.b32 #ctx_dst_xoff ~0x0007ffff
+.b32 #ctx_dst_yoff ~0x00001fff
+// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
+.b16 0xc0 2
+.b32 0x00010000 + #cmd_exec ~0xffffffff
+.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
+// mthd 0x030c-0x0340, various stuff
+.b16 0xc3 14
+.b32 #ctx_src_address_high ~0x000000ff
+.b32 #ctx_src_address_low ~0xffffffff
+.b32 #ctx_dst_address_high ~0x000000ff
+.b32 #ctx_dst_address_low ~0xffffffff
+.b32 #ctx_src_pitch ~0x0007ffff
+.b32 #ctx_dst_pitch ~0x0007ffff
+.b32 #ctx_xcnt ~0x0000ffff
+.b32 #ctx_ycnt ~0x00001fff
+.b32 #ctx_format ~0x0333ffff
+.b32 #ctx_swz_const0 ~0xffffffff
+.b32 #ctx_swz_const1 ~0xffffffff
+.b32 #ctx_query_address_high ~0x000000ff
+.b32 #ctx_query_address_low ~0xffffffff
+.b32 #ctx_query_counter ~0xffffffff
+.b16 0x800 0
+
+ifdef(`NVA3',
+.section #nva3_pcopy_code
+,
+.section #nvc0_pcopy_code
+)
+
+main:
+ clear b32 $r0
+ mov $sp $r0
+
+ // setup i0 handler and route fifo and ctxswitch to it
+ mov $r1 #ih
+ mov $iv0 $r1
+ mov $r1 0x400
+ movw $r2 0xfff3
+ sethi $r2 0
+ iowr I[$r1 + 0x300] $r2
+
+ // enable interrupts
+ or $r2 0xc
+ iowr I[$r1] $r2
+ bset $flags ie0
+
+ // enable fifo access and context switching
+ mov $r1 0x1200
+ mov $r2 3
+ iowr I[$r1] $r2
+
+ // sleep forever, waking for interrupts
+ bset $flags $p0
+ spin:
+ sleep $p0
+ bra #spin
+
+// i0 handler
+ih:
+ iord $r1 I[$r0 + 0x200]
+
+ and $r2 $r1 0x00000008
+ bra e #ih_no_chsw
+ call #chsw
+ ih_no_chsw:
+ and $r2 $r1 0x00000004
+ bra e #ih_no_cmd
+ call #dispatch
+
+ ih_no_cmd:
+ and $r1 $r1 0x0000000c
+ iowr I[$r0 + 0x100] $r1
+ iret
+
+// $p1 direction (0 = unload, 1 = load)
+// $r3 channel
+swctx:
+ mov $r4 0x7700
+ mov $xtargets $r4
+ifdef(`NVA3', `
+ // target 7 hardcoded to ctx dma object
+ mov $xdbase $r0
+', ` // NVC0
+ // read SCRATCH3 to decide if we are PCOPY0 or PCOPY1
+ mov $r4 0x2100
+ iord $r4 I[$r4 + 0]
+ and $r4 1
+ shl b32 $r4 4
+ add b32 $r4 0x30
+
+ // channel is in vram
+ mov $r15 0x61c
+ shl b32 $r15 6
+ mov $r5 0x114
+ iowrs I[$r15] $r5
+
+ // read 16-byte PCOPYn info, containing context pointer, from channel
+ shl b32 $r5 $r3 4
+ add b32 $r5 2
+ mov $xdbase $r5
+ mov $r5 $sp
+ // get a chunk of stack space, aligned to 256 byte boundary
+ sub b32 $r5 0x100
+ mov $r6 0xff
+ not b32 $r6
+ and $r5 $r6
+ sethi $r5 0x00020000
+ xdld $r4 $r5
+ xdwait
+ sethi $r5 0
+
+ // set context pointer, from within channel VM
+ mov $r14 0
+ iowrs I[$r15] $r14
+ ld b32 $r4 D[$r5 + 0]
+ shr b32 $r4 8
+ ld b32 $r6 D[$r5 + 4]
+ shl b32 $r6 24
+ or $r4 $r6
+ mov $xdbase $r4
+')
+ // 256-byte context, at start of data segment
+ mov b32 $r4 $r0
+ sethi $r4 0x60000
+
+ // swap!
+ bra $p1 #swctx_load
+ xdst $r0 $r4
+ bra #swctx_done
+ swctx_load:
+ xdld $r0 $r4
+ swctx_done:
+ xdwait
+ ret
+
+chsw:
+ // read current channel
+ mov $r2 0x1400
+ iord $r3 I[$r2]
+
+ // if it's active, unload it and return
+ xbit $r15 $r3 0x1e
+ bra e #chsw_no_unload
+ bclr $flags $p1
+ call #swctx
+ bclr $r3 0x1e
+ iowr I[$r2] $r3
+ mov $r4 1
+ iowr I[$r2 + 0x200] $r4
+ ret
+
+ // read next channel
+ chsw_no_unload:
+ iord $r3 I[$r2 + 0x100]
+
+ // is there a channel waiting to be loaded?
+ xbit $r13 $r3 0x1e
+ bra e #chsw_finish_load
+ bset $flags $p1
+ call #swctx
+ifdef(`NVA3',
+ // load dma objects back into TARGET regs
+ mov $r5 #ctx_dma
+ mov $r6 #ctx_dma_count
+ chsw_load_ctx_dma:
+ ld b32 $r7 D[$r5 + $r6 * 4]
+ add b32 $r8 $r6 0x180
+ shl b32 $r8 8
+ iowr I[$r8] $r7
+ sub b32 $r6 1
+ bra nc #chsw_load_ctx_dma
+,)
+
+ chsw_finish_load:
+ mov $r3 2
+ iowr I[$r2 + 0x200] $r3
+ ret
+
+dispatch:
+ // read incoming fifo command
+ mov $r3 0x1900
+ iord $r2 I[$r3 + 0x100]
+ iord $r3 I[$r3 + 0x000]
+ and $r4 $r2 0x7ff
+ // $r2 will be used to store exception data
+ shl b32 $r2 0x10
+
+ // lookup method in the dispatch table, ILLEGAL_MTHD if not found
+ mov $r5 #dispatch_table
+ clear b32 $r6
+ clear b32 $r7
+ dispatch_loop:
+ ld b16 $r6 D[$r5 + 0]
+ ld b16 $r7 D[$r5 + 2]
+ add b32 $r5 4
+ cmpu b32 $r4 $r6
+ bra c #dispatch_illegal_mthd
+ add b32 $r7 $r6
+ cmpu b32 $r4 $r7
+ bra c #dispatch_valid_mthd
+ sub b32 $r7 $r6
+ shl b32 $r7 3
+ add b32 $r5 $r7
+ bra #dispatch_loop
+
+ // ensure no bits set in reserved fields, INVALID_BITFIELD
+ dispatch_valid_mthd:
+ sub b32 $r4 $r6
+ shl b32 $r4 3
+ add b32 $r4 $r5
+ ld b32 $r5 D[$r4 + 4]
+ and $r5 $r3
+ cmpu b32 $r5 0
+ bra ne #dispatch_invalid_bitfield
+
+ // depending on dispatch flags: execute method, or save data as state
+ ld b16 $r5 D[$r4 + 0]
+ ld b16 $r6 D[$r4 + 2]
+ cmpu b32 $r6 0
+ bra ne #dispatch_cmd
+ st b32 D[$r5] $r3
+ bra #dispatch_done
+ dispatch_cmd:
+ bclr $flags $p1
+ call $r5
+ bra $p1 #dispatch_error
+ bra #dispatch_done
+
+ dispatch_invalid_bitfield:
+ or $r2 2
+ dispatch_illegal_mthd:
+ or $r2 1
+
+ // store exception data in SCRATCH0/SCRATCH1, signal hostirq
+ dispatch_error:
+ mov $r4 0x1000
+ iowr I[$r4 + 0x000] $r2
+ iowr I[$r4 + 0x100] $r3
+ mov $r2 0x40
+ iowr I[$r0] $r2
+ hostirq_wait:
+ iord $r2 I[$r0 + 0x200]
+ and $r2 0x40
+ cmpu b32 $r2 0
+ bra ne #hostirq_wait
+
+ dispatch_done:
+ mov $r2 0x1d00
+ mov $r3 1
+ iowr I[$r2] $r3
+ ret
+
+// No-operation
+//
+// Inputs:
+// $r1: irqh state
+// $r2: hostirq state
+// $r3: data
+// $r4: dispatch table entry
+// Outputs:
+// $r1: irqh state
+// $p1: set on error
+// $r2: hostirq state
+// $r3: data
+cmd_nop:
+ ret
+
+// PM_TRIGGER
+//
+// Inputs:
+// $r1: irqh state
+// $r2: hostirq state
+// $r3: data
+// $r4: dispatch table entry
+// Outputs:
+// $r1: irqh state
+// $p1: set on error
+// $r2: hostirq state
+// $r3: data
+cmd_pm_trigger:
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x20000
+ iowr I[$r2] $r3
+ ret
+
+ifdef(`NVA3',
+// SET_DMA_* method handler
+//
+// Inputs:
+// $r1: irqh state
+// $r2: hostirq state
+// $r3: data
+// $r4: dispatch table entry
+// Outputs:
+// $r1: irqh state
+// $p1: set on error
+// $r2: hostirq state
+// $r3: data
+cmd_dma:
+ sub b32 $r4 #dispatch_dma
+ shr b32 $r4 1
+ bset $r3 0x1e
+ st b32 D[$r4 + #ctx_dma] $r3
+ add b32 $r4 0x600
+ shl b32 $r4 6
+ iowr I[$r4] $r3
+ ret
+,)
+
+// Calculates the hw swizzle mask and adjusts the surface's xcnt to match
+//
+cmd_exec_set_format:
+ // zero out a chunk of the stack to store the swizzle into
+ add $sp -0x10
+ st b32 D[$sp + 0x00] $r0
+ st b32 D[$sp + 0x04] $r0
+ st b32 D[$sp + 0x08] $r0
+ st b32 D[$sp + 0x0c] $r0
+
+ // extract cpp, src_ncomp and dst_ncomp from FORMAT
+ ld b32 $r4 D[$r0 + #ctx_format]
+ extr $r5 $r4 16:17
+ add b32 $r5 1
+ extr $r6 $r4 20:21
+ add b32 $r6 1
+ extr $r7 $r4 24:25
+ add b32 $r7 1
+
+ // convert FORMAT swizzle mask to hw swizzle mask
+ bclr $flags $p2
+ clear b32 $r8
+ clear b32 $r9
+ ncomp_loop:
+ and $r10 $r4 0xf
+ shr b32 $r4 4
+ clear b32 $r11
+ bpc_loop:
+ cmpu b8 $r10 4
+ bra nc #cmp_c0
+ mulu $r12 $r10 $r5
+ add b32 $r12 $r11
+ bset $flags $p2
+ bra #bpc_next
+ cmp_c0:
+ bra ne #cmp_c1
+ mov $r12 0x10
+ add b32 $r12 $r11
+ bra #bpc_next
+ cmp_c1:
+ cmpu b8 $r10 6
+ bra nc #cmp_zero
+ mov $r12 0x14
+ add b32 $r12 $r11
+ bra #bpc_next
+ cmp_zero:
+ mov $r12 0x80
+ bpc_next:
+ st b8 D[$sp + $r8] $r12
+ add b32 $r8 1
+ add b32 $r11 1
+ cmpu b32 $r11 $r5
+ bra c #bpc_loop
+ add b32 $r9 1
+ cmpu b32 $r9 $r7
+ bra c #ncomp_loop
+
+ // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
+ mulu $r6 $r5
+ st b32 D[$r0 + #ctx_src_cpp] $r6
+ ld b32 $r8 D[$r0 + #ctx_xcnt]
+ mulu $r6 $r8
+ bra $p2 #dst_xcnt
+ clear b32 $r6
+
+ dst_xcnt:
+ mulu $r7 $r5
+ st b32 D[$r0 + #ctx_dst_cpp] $r7
+ mulu $r7 $r8
+
+ mov $r5 0x810
+ shl b32 $r5 6
+ iowr I[$r5 + 0x000] $r6
+ iowr I[$r5 + 0x100] $r7
+ add b32 $r5 0x800
+ ld b32 $r6 D[$r0 + #ctx_dst_cpp]
+ sub b32 $r6 1
+ shl b32 $r6 8
+ ld b32 $r7 D[$r0 + #ctx_src_cpp]
+ sub b32 $r7 1
+ or $r6 $r7
+ iowr I[$r5 + 0x000] $r6
+ add b32 $r5 0x100
+ ld b32 $r6 D[$sp + 0x00]
+ iowr I[$r5 + 0x000] $r6
+ ld b32 $r6 D[$sp + 0x04]
+ iowr I[$r5 + 0x100] $r6
+ ld b32 $r6 D[$sp + 0x08]
+ iowr I[$r5 + 0x200] $r6
+ ld b32 $r6 D[$sp + 0x0c]
+ iowr I[$r5 + 0x300] $r6
+ add b32 $r5 0x400
+ ld b32 $r6 D[$r0 + #ctx_swz_const0]
+ iowr I[$r5 + 0x000] $r6
+ ld b32 $r6 D[$r0 + #ctx_swz_const1]
+ iowr I[$r5 + 0x100] $r6
+ add $sp 0x10
+ ret
+
+// Setup to handle a tiled surface
+//
+// Calculates a number of parameters the hardware requires in order
+// to correctly handle tiling.
+//
+// Offset calculation is performed as follows (Tp/Th/Td from TILE_MODE):
+// nTx = round_up(w * cpp, 1 << Tp) >> Tp
+// nTy = round_up(h, 1 << Th) >> Th
+// Txo = (x * cpp) & ((1 << Tp) - 1)
+// Tx = (x * cpp) >> Tp
+// Tyo = y & ((1 << Th) - 1)
+// Ty = y >> Th
+// Tzo = z & ((1 << Td) - 1)
+// Tz = z >> Td
+//
+// off = (Tzo << Tp << Th) + (Tyo << Tp) + Txo
+// off += ((Tz * nTy * nTx)) + (Ty * nTx) + Tx) << Td << Th << Tp;
+//
+// Inputs:
+// $r4: hw command (0x104800)
+// $r5: ctx offset adjustment for src/dst selection
+// $p2: set if dst surface
+//
+cmd_exec_set_surface_tiled:
+ // translate TILE_MODE into Tp, Th, Td shift values
+ ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
+ extr $r9 $r7 8:11
+ extr $r8 $r7 4:7
+ifdef(`NVA3',
+ add b32 $r8 2
+,
+ add b32 $r8 3
+)
+ extr $r7 $r7 0:3
+ cmp b32 $r7 0xe
+ bra ne #xtile64
+ mov $r7 4
+ bra #xtileok
+ xtile64:
+ xbit $r7 $flags $p2
+ add b32 $r7 17
+ bset $r4 $r7
+ mov $r7 6
+ xtileok:
+
+ // Op = (x * cpp) & ((1 << Tp) - 1)
+ // Tx = (x * cpp) >> Tp
+ ld b32 $r10 D[$r5 + #ctx_src_xoff]
+ ld b32 $r11 D[$r5 + #ctx_src_cpp]
+ mulu $r10 $r11
+ mov $r11 1
+ shl b32 $r11 $r7
+ sub b32 $r11 1
+ and $r12 $r10 $r11
+ shr b32 $r10 $r7
+
+ // Tyo = y & ((1 << Th) - 1)
+ // Ty = y >> Th
+ ld b32 $r13 D[$r5 + #ctx_src_yoff]
+ mov $r14 1
+ shl b32 $r14 $r8
+ sub b32 $r14 1
+ and $r11 $r13 $r14
+ shr b32 $r13 $r8
+
+ // YTILE = ((1 << Th) << 12) | ((1 << Th) - Tyo)
+ add b32 $r14 1
+ shl b32 $r15 $r14 12
+ sub b32 $r14 $r11
+ or $r15 $r14
+ xbit $r6 $flags $p2
+ add b32 $r6 0x208
+ shl b32 $r6 8
+ iowr I[$r6 + 0x000] $r15
+
+ // Op += Tyo << Tp
+ shl b32 $r11 $r7
+ add b32 $r12 $r11
+
+ // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
+ ld b32 $r15 D[$r5 + #ctx_src_xsize]
+ ld b32 $r11 D[$r5 + #ctx_src_cpp]
+ mulu $r15 $r11
+ mov $r11 1
+ shl b32 $r11 $r7
+ sub b32 $r11 1
+ add b32 $r15 $r11
+ shr b32 $r15 $r7
+ push $r15
+
+ // nTy = (h + ((1 << Th) - 1)) >> Th
+ ld b32 $r15 D[$r5 + #ctx_src_ysize]
+ mov $r11 1
+ shl b32 $r11 $r8
+ sub b32 $r11 1
+ add b32 $r15 $r11
+ shr b32 $r15 $r8
+ push $r15
+
+ // Tys = Tp + Th
+ // CFG_YZ_TILE_SIZE = ((1 << Th) >> 2) << Td
+ add b32 $r7 $r8
+ sub b32 $r8 2
+ mov $r11 1
+ shl b32 $r11 $r8
+ shl b32 $r11 $r9
+
+ // Tzo = z & ((1 << Td) - 1)
+ // Tz = z >> Td
+ // Op += Tzo << Tys
+ // Ts = Tys + Td
+ ld b32 $r8 D[$r5 + #ctx_src_zoff]
+ mov $r14 1
+ shl b32 $r14 $r9
+ sub b32 $r14 1
+ and $r15 $r8 $r14
+ shl b32 $r15 $r7
+ add b32 $r12 $r15
+ add b32 $r7 $r9
+ shr b32 $r8 $r9
+
+ // Ot = ((Tz * nTy * nTx) + (Ty * nTx) + Tx) << Ts
+ pop $r15
+ pop $r9
+ mulu $r13 $r9
+ add b32 $r10 $r13
+ mulu $r8 $r9
+ mulu $r8 $r15
+ add b32 $r10 $r8
+ shl b32 $r10 $r7
+
+ // PITCH = (nTx - 1) << Ts
+ sub b32 $r9 1
+ shl b32 $r9 $r7
+ iowr I[$r6 + 0x200] $r9
+
+ // SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff
+ // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
+ ld b32 $r7 D[$r5 + #ctx_src_address_low]
+ ld b32 $r8 D[$r5 + #ctx_src_address_high]
+ add b32 $r10 $r12
+ add b32 $r7 $r10
+ adc b32 $r8 0
+ shl b32 $r8 16
+ or $r8 $r11
+ sub b32 $r6 0x600
+ iowr I[$r6 + 0x000] $r7
+ add b32 $r6 0x400
+ iowr I[$r6 + 0x000] $r8
+ ret
+
+// Setup to handle a linear surface
+//
+// Nothing to see here.. Sets ADDRESS and PITCH, pretty non-exciting
+//
+cmd_exec_set_surface_linear:
+ xbit $r6 $flags $p2
+ add b32 $r6 0x202
+ shl b32 $r6 8
+ ld b32 $r7 D[$r5 + #ctx_src_address_low]
+ iowr I[$r6 + 0x000] $r7
+ add b32 $r6 0x400
+ ld b32 $r7 D[$r5 + #ctx_src_address_high]
+ shl b32 $r7 16
+ iowr I[$r6 + 0x000] $r7
+ add b32 $r6 0x400
+ ld b32 $r7 D[$r5 + #ctx_src_pitch]
+ iowr I[$r6 + 0x000] $r7
+ ret
+
+// wait for regs to be available for use
+cmd_exec_wait:
+ push $r0
+ push $r1
+ mov $r0 0x800
+ shl b32 $r0 6
+ loop:
+ iord $r1 I[$r0]
+ and $r1 1
+ bra ne #loop
+ pop $r1
+ pop $r0
+ ret
+
+cmd_exec_query:
+ // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
+ xbit $r4 $r3 13
+ bra ne #query_counter
+ call #cmd_exec_wait
+ mov $r4 0x80c
+ shl b32 $r4 6
+ ld b32 $r5 D[$r0 + #ctx_query_address_low]
+ add b32 $r5 4
+ iowr I[$r4 + 0x000] $r5
+ iowr I[$r4 + 0x100] $r0
+ mov $r5 0xc
+ iowr I[$r4 + 0x200] $r5
+ add b32 $r4 0x400
+ ld b32 $r5 D[$r0 + #ctx_query_address_high]
+ shl b32 $r5 16
+ iowr I[$r4 + 0x000] $r5
+ add b32 $r4 0x500
+ mov $r5 0x00000b00
+ sethi $r5 0x00010000
+ iowr I[$r4 + 0x000] $r5
+ mov $r5 0x00004040
+ shl b32 $r5 1
+ sethi $r5 0x80800000
+ iowr I[$r4 + 0x100] $r5
+ mov $r5 0x00001110
+ sethi $r5 0x13120000
+ iowr I[$r4 + 0x200] $r5
+ mov $r5 0x00001514
+ sethi $r5 0x17160000
+ iowr I[$r4 + 0x300] $r5
+ mov $r5 0x00002601
+ sethi $r5 0x00010000
+ mov $r4 0x800
+ shl b32 $r4 6
+ iowr I[$r4 + 0x000] $r5
+
+ // write COUNTER
+ query_counter:
+ call #cmd_exec_wait
+ mov $r4 0x80c
+ shl b32 $r4 6
+ ld b32 $r5 D[$r0 + #ctx_query_address_low]
+ iowr I[$r4 + 0x000] $r5
+ iowr I[$r4 + 0x100] $r0
+ mov $r5 0x4
+ iowr I[$r4 + 0x200] $r5
+ add b32 $r4 0x400
+ ld b32 $r5 D[$r0 + #ctx_query_address_high]
+ shl b32 $r5 16
+ iowr I[$r4 + 0x000] $r5
+ add b32 $r4 0x500
+ mov $r5 0x00000300
+ iowr I[$r4 + 0x000] $r5
+ mov $r5 0x00001110
+ sethi $r5 0x13120000
+ iowr I[$r4 + 0x100] $r5
+ ld b32 $r5 D[$r0 + #ctx_query_counter]
+ add b32 $r4 0x500
+ iowr I[$r4 + 0x000] $r5
+ mov $r5 0x00002601
+ sethi $r5 0x00010000
+ mov $r4 0x800
+ shl b32 $r4 6
+ iowr I[$r4 + 0x000] $r5
+ ret
+
+// Execute a copy operation
+//
+// Inputs:
+// $r1: irqh state
+// $r2: hostirq state
+// $r3: data
+// 000002000 QUERY_SHORT
+// 000001000 QUERY
+// 000000100 DST_LINEAR
+// 000000010 SRC_LINEAR
+// 000000001 FORMAT
+// $r4: dispatch table entry
+// Outputs:
+// $r1: irqh state
+// $p1: set on error
+// $r2: hostirq state
+// $r3: data
+cmd_exec:
+ call #cmd_exec_wait
+
+ // if format requested, call function to calculate it, otherwise
+ // fill in cpp/xcnt for both surfaces as if (cpp == 1)
+ xbit $r15 $r3 0
+ bra e #cmd_exec_no_format
+ call #cmd_exec_set_format
+ mov $r4 0x200
+ bra #cmd_exec_init_src_surface
+ cmd_exec_no_format:
+ mov $r6 0x810
+ shl b32 $r6 6
+ mov $r7 1
+ st b32 D[$r0 + #ctx_src_cpp] $r7
+ st b32 D[$r0 + #ctx_dst_cpp] $r7
+ ld b32 $r7 D[$r0 + #ctx_xcnt]
+ iowr I[$r6 + 0x000] $r7
+ iowr I[$r6 + 0x100] $r7
+ clear b32 $r4
+
+ cmd_exec_init_src_surface:
+ bclr $flags $p2
+ clear b32 $r5
+ xbit $r15 $r3 4
+ bra e #src_tiled
+ call #cmd_exec_set_surface_linear
+ bra #cmd_exec_init_dst_surface
+ src_tiled:
+ call #cmd_exec_set_surface_tiled
+ bset $r4 7
+
+ cmd_exec_init_dst_surface:
+ bset $flags $p2
+ mov $r5 #ctx_dst_address_high - #ctx_src_address_high
+ xbit $r15 $r3 8
+ bra e #dst_tiled
+ call #cmd_exec_set_surface_linear
+ bra #cmd_exec_kick
+ dst_tiled:
+ call #cmd_exec_set_surface_tiled
+ bset $r4 8
+
+ cmd_exec_kick:
+ mov $r5 0x800
+ shl b32 $r5 6
+ ld b32 $r6 D[$r0 + #ctx_ycnt]
+ iowr I[$r5 + 0x100] $r6
+ mov $r6 0x0041
+ // SRC_TARGET = 1, DST_TARGET = 2
+ sethi $r6 0x44000000
+ or $r4 $r6
+ iowr I[$r5] $r4
+
+ // if requested, queue up a QUERY write after the copy has completed
+ xbit $r15 $r3 12
+ bra e #cmd_exec_done
+ call #cmd_exec_query
+
+ cmd_exec_done:
+ ret
+
+// Flush write cache
+//
+// Inputs:
+// $r1: irqh state
+// $r2: hostirq state
+// $r3: data
+// $r4: dispatch table entry
+// Outputs:
+// $r1: irqh state
+// $p1: set on error
+// $r2: hostirq state
+// $r3: data
+cmd_wrcache_flush:
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x10000
+ iowr I[$r2] $r3
+ ret
+
+.align 0x100
--- /dev/null
+u32 nva3_pcopy_data[] = {
+/* 0x0000: ctx_object */
+ 0x00000000,
+/* 0x0004: ctx_dma */
+/* 0x0004: ctx_dma_query */
+ 0x00000000,
+/* 0x0008: ctx_dma_src */
+ 0x00000000,
+/* 0x000c: ctx_dma_dst */
+ 0x00000000,
+/* 0x0010: ctx_query_address_high */
+ 0x00000000,
+/* 0x0014: ctx_query_address_low */
+ 0x00000000,
+/* 0x0018: ctx_query_counter */
+ 0x00000000,
+/* 0x001c: ctx_src_address_high */
+ 0x00000000,
+/* 0x0020: ctx_src_address_low */
+ 0x00000000,
+/* 0x0024: ctx_src_pitch */
+ 0x00000000,
+/* 0x0028: ctx_src_tile_mode */
+ 0x00000000,
+/* 0x002c: ctx_src_xsize */
+ 0x00000000,
+/* 0x0030: ctx_src_ysize */
+ 0x00000000,
+/* 0x0034: ctx_src_zsize */
+ 0x00000000,
+/* 0x0038: ctx_src_zoff */
+ 0x00000000,
+/* 0x003c: ctx_src_xoff */
+ 0x00000000,
+/* 0x0040: ctx_src_yoff */
+ 0x00000000,
+/* 0x0044: ctx_src_cpp */
+ 0x00000000,
+/* 0x0048: ctx_dst_address_high */
+ 0x00000000,
+/* 0x004c: ctx_dst_address_low */
+ 0x00000000,
+/* 0x0050: ctx_dst_pitch */
+ 0x00000000,
+/* 0x0054: ctx_dst_tile_mode */
+ 0x00000000,
+/* 0x0058: ctx_dst_xsize */
+ 0x00000000,
+/* 0x005c: ctx_dst_ysize */
+ 0x00000000,
+/* 0x0060: ctx_dst_zsize */
+ 0x00000000,
+/* 0x0064: ctx_dst_zoff */
+ 0x00000000,
+/* 0x0068: ctx_dst_xoff */
+ 0x00000000,
+/* 0x006c: ctx_dst_yoff */
+ 0x00000000,
+/* 0x0070: ctx_dst_cpp */
+ 0x00000000,
+/* 0x0074: ctx_format */
+ 0x00000000,
+/* 0x0078: ctx_swz_const0 */
+ 0x00000000,
+/* 0x007c: ctx_swz_const1 */
+ 0x00000000,
+/* 0x0080: ctx_xcnt */
+ 0x00000000,
+/* 0x0084: ctx_ycnt */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0100: dispatch_table */
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00010040,
+ 0x00010160,
+ 0x00000000,
+ 0x00010050,
+ 0x00010162,
+ 0x00000000,
+ 0x00030060,
+/* 0x0128: dispatch_dma */
+ 0x00010170,
+ 0x00000000,
+ 0x00010170,
+ 0x00000000,
+ 0x00010170,
+ 0x00000000,
+ 0x00070080,
+ 0x00000028,
+ 0xfffff000,
+ 0x0000002c,
+ 0xfff80000,
+ 0x00000030,
+ 0xffffe000,
+ 0x00000034,
+ 0xfffff800,
+ 0x00000038,
+ 0xfffff000,
+ 0x0000003c,
+ 0xfff80000,
+ 0x00000040,
+ 0xffffe000,
+ 0x00070088,
+ 0x00000054,
+ 0xfffff000,
+ 0x00000058,
+ 0xfff80000,
+ 0x0000005c,
+ 0xffffe000,
+ 0x00000060,
+ 0xfffff800,
+ 0x00000064,
+ 0xfffff000,
+ 0x00000068,
+ 0xfff80000,
+ 0x0000006c,
+ 0xffffe000,
+ 0x000200c0,
+ 0x00010492,
+ 0x00000000,
+ 0x0001051b,
+ 0x00000000,
+ 0x000e00c3,
+ 0x0000001c,
+ 0xffffff00,
+ 0x00000020,
+ 0x00000000,
+ 0x00000048,
+ 0xffffff00,
+ 0x0000004c,
+ 0x00000000,
+ 0x00000024,
+ 0xfff80000,
+ 0x00000050,
+ 0xfff80000,
+ 0x00000080,
+ 0xffff0000,
+ 0x00000084,
+ 0xffffe000,
+ 0x00000074,
+ 0xfccc0000,
+ 0x00000078,
+ 0x00000000,
+ 0x0000007c,
+ 0x00000000,
+ 0x00000010,
+ 0xffffff00,
+ 0x00000014,
+ 0x00000000,
+ 0x00000018,
+ 0x00000000,
+ 0x00000800,
+};
+
+u32 nva3_pcopy_code[] = {
+/* 0x0000: main */
+ 0x04fe04bd,
+ 0x3517f000,
+ 0xf10010fe,
+ 0xf1040017,
+ 0xf0fff327,
+ 0x12d00023,
+ 0x0c25f0c0,
+ 0xf40012d0,
+ 0x17f11031,
+ 0x27f01200,
+ 0x0012d003,
+/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
+/* 0x0035: ih */
+ 0x8001cffd,
+ 0xf40812c4,
+ 0x21f4060b,
+/* 0x0041: ih_no_chsw */
+ 0x0412c472,
+ 0xf4060bf4,
+/* 0x004a: ih_no_cmd */
+ 0x11c4c321,
+ 0x4001d00c,
+/* 0x0052: swctx */
+ 0x47f101f8,
+ 0x4bfe7700,
+ 0x0007fe00,
+ 0xf00204b9,
+ 0x01f40643,
+ 0x0604fa09,
+/* 0x006b: swctx_load */
+ 0xfa060ef4,
+/* 0x006e: swctx_done */
+ 0x03f80504,
+/* 0x0072: chsw */
+ 0x27f100f8,
+ 0x23cf1400,
+ 0x1e3fc800,
+ 0xf4170bf4,
+ 0x21f40132,
+ 0x1e3af052,
+ 0xf00023d0,
+ 0x24d00147,
+/* 0x0093: chsw_no_unload */
+ 0xcf00f880,
+ 0x3dc84023,
+ 0x220bf41e,
+ 0xf40131f4,
+ 0x57f05221,
+ 0x0367f004,
+/* 0x00a8: chsw_load_ctx_dma */
+ 0xa07856bc,
+ 0xb6018068,
+ 0x87d00884,
+ 0x0162b600,
+/* 0x00bb: chsw_finish_load */
+ 0xf0f018f4,
+ 0x23d00237,
+/* 0x00c3: dispatch */
+ 0xf100f880,
+ 0xcf190037,
+ 0x33cf4032,
+ 0xff24e400,
+ 0x1024b607,
+ 0x010057f1,
+ 0x74bd64bd,
+/* 0x00dc: dispatch_loop */
+ 0x58005658,
+ 0x50b60157,
+ 0x0446b804,
+ 0xbb4d08f4,
+ 0x47b80076,
+ 0x0f08f404,
+ 0xb60276bb,
+ 0x57bb0374,
+ 0xdf0ef400,
+/* 0x0100: dispatch_valid_mthd */
+ 0xb60246bb,
+ 0x45bb0344,
+ 0x01459800,
+ 0xb00453fd,
+ 0x1bf40054,
+ 0x00455820,
+ 0xb0014658,
+ 0x1bf40064,
+ 0x00538009,
+/* 0x0127: dispatch_cmd */
+ 0xf4300ef4,
+ 0x55f90132,
+ 0xf40c01f4,
+/* 0x0132: dispatch_invalid_bitfield */
+ 0x25f0250e,
+/* 0x0135: dispatch_illegal_mthd */
+ 0x0125f002,
+/* 0x0138: dispatch_error */
+ 0x100047f1,
+ 0xd00042d0,
+ 0x27f04043,
+ 0x0002d040,
+/* 0x0148: hostirq_wait */
+ 0xf08002cf,
+ 0x24b04024,
+ 0xf71bf400,
+/* 0x0154: dispatch_done */
+ 0x1d0027f1,
+ 0xd00137f0,
+ 0x00f80023,
+/* 0x0160: cmd_nop */
+/* 0x0162: cmd_pm_trigger */
+ 0x27f100f8,
+ 0x34bd2200,
+ 0xd00233f0,
+ 0x00f80023,
+/* 0x0170: cmd_dma */
+ 0x012842b7,
+ 0xf00145b6,
+ 0x43801e39,
+ 0x0040b701,
+ 0x0644b606,
+ 0xf80043d0,
+/* 0x0189: cmd_exec_set_format */
+ 0xf030f400,
+ 0xb00001b0,
+ 0x01b00101,
+ 0x0301b002,
+ 0xc71d0498,
+ 0x50b63045,
+ 0x3446c701,
+ 0xc70160b6,
+ 0x70b63847,
+ 0x0232f401,
+ 0x94bd84bd,
+/* 0x01b4: ncomp_loop */
+ 0xb60f4ac4,
+ 0xb4bd0445,
+/* 0x01bc: bpc_loop */
+ 0xf404a430,
+ 0xa5ff0f18,
+ 0x00cbbbc0,
+ 0xf40231f4,
+/* 0x01ce: cmp_c0 */
+ 0x1bf4220e,
+ 0x10c7f00c,
+ 0xf400cbbb,
+/* 0x01da: cmp_c1 */
+ 0xa430160e,
+ 0x0c18f406,
+ 0xbb14c7f0,
+ 0x0ef400cb,
+/* 0x01e9: cmp_zero */
+ 0x80c7f107,
+/* 0x01ed: bpc_next */
+ 0x01c83800,
+ 0xb60180b6,
+ 0xb5b801b0,
+ 0xc308f404,
+ 0xb80190b6,
+ 0x08f40497,
+ 0x0065fdb2,
+ 0x98110680,
+ 0x68fd2008,
+ 0x0502f400,
+/* 0x0216: dst_xcnt */
+ 0x75fd64bd,
+ 0x1c078000,
+ 0xf10078fd,
+ 0xb6081057,
+ 0x56d00654,
+ 0x4057d000,
+ 0x080050b7,
+ 0xb61c0698,
+ 0x64b60162,
+ 0x11079808,
+ 0xfd0172b6,
+ 0x56d00567,
+ 0x0050b700,
+ 0x0060b401,
+ 0xb40056d0,
+ 0x56d00160,
+ 0x0260b440,
+ 0xb48056d0,
+ 0x56d00360,
+ 0x0050b7c0,
+ 0x1e069804,
+ 0x980056d0,
+ 0x56d01f06,
+ 0x1030f440,
+/* 0x0276: cmd_exec_set_surface_tiled */
+ 0x579800f8,
+ 0x6879c70a,
+ 0xb66478c7,
+ 0x77c70280,
+ 0x0e76b060,
+ 0xf0091bf4,
+ 0x0ef40477,
+/* 0x0291: xtile64 */
+ 0x027cf00f,
+ 0xfd1170b6,
+ 0x77f00947,
+/* 0x029d: xtileok */
+ 0x0f5a9806,
+ 0xfd115b98,
+ 0xb7f000ab,
+ 0x04b7bb01,
+ 0xff01b2b6,
+ 0xa7bbc4ab,
+ 0x105d9805,
+ 0xbb01e7f0,
+ 0xe2b604e8,
+ 0xb4deff01,
+ 0xb605d8bb,
+ 0xef9401e0,
+ 0x02ebbb0c,
+ 0xf005fefd,
+ 0x60b7026c,
+ 0x64b60208,
+ 0x006fd008,
+ 0xbb04b7bb,
+ 0x5f9800cb,
+ 0x115b980b,
+ 0xf000fbfd,
+ 0xb7bb01b7,
+ 0x01b2b604,
+ 0xbb00fbbb,
+ 0xf0f905f7,
+ 0xf00c5f98,
+ 0xb8bb01b7,
+ 0x01b2b604,
+ 0xbb00fbbb,
+ 0xf0f905f8,
+ 0xb60078bb,
+ 0xb7f00282,
+ 0x04b8bb01,
+ 0x9804b9bb,
+ 0xe7f00e58,
+ 0x04e9bb01,
+ 0xff01e2b6,
+ 0xf7bbf48e,
+ 0x00cfbb04,
+ 0xbb0079bb,
+ 0xf0fc0589,
+ 0xd9fd90fc,
+ 0x00adbb00,
+ 0xfd0089fd,
+ 0xa8bb008f,
+ 0x04a7bb00,
+ 0xbb0192b6,
+ 0x69d00497,
+ 0x08579880,
+ 0xbb075898,
+ 0x7abb00ac,
+ 0x0081b600,
+ 0xfd1084b6,
+ 0x62b7058b,
+ 0x67d00600,
+ 0x0060b700,
+ 0x0068d004,
+/* 0x0382: cmd_exec_set_surface_linear */
+ 0x6cf000f8,
+ 0x0260b702,
+ 0x0864b602,
+ 0xd0085798,
+ 0x60b70067,
+ 0x57980400,
+ 0x1074b607,
+ 0xb70067d0,
+ 0x98040060,
+ 0x67d00957,
+/* 0x03ab: cmd_exec_wait */
+ 0xf900f800,
+ 0xf110f900,
+ 0xb6080007,
+/* 0x03b6: loop */
+ 0x01cf0604,
+ 0x0114f000,
+ 0xfcfa1bf4,
+ 0xf800fc10,
+/* 0x03c5: cmd_exec_query */
+ 0x0d34c800,
+ 0xf5701bf4,
+ 0xf103ab21,
+ 0xb6080c47,
+ 0x05980644,
+ 0x0450b605,
+ 0xd00045d0,
+ 0x57f04040,
+ 0x8045d00c,
+ 0x040040b7,
+ 0xb6040598,
+ 0x45d01054,
+ 0x0040b700,
+ 0x0057f105,
+ 0x0153f00b,
+ 0xf10045d0,
+ 0xb6404057,
+ 0x53f10154,
+ 0x45d08080,
+ 0x1057f140,
+ 0x1253f111,
+ 0x8045d013,
+ 0x151457f1,
+ 0x171653f1,
+ 0xf1c045d0,
+ 0xf0260157,
+ 0x47f10153,
+ 0x44b60800,
+ 0x0045d006,
+/* 0x0438: query_counter */
+ 0x03ab21f5,
+ 0x080c47f1,
+ 0x980644b6,
+ 0x45d00505,
+ 0x4040d000,
+ 0xd00457f0,
+ 0x40b78045,
+ 0x05980400,
+ 0x1054b604,
+ 0xb70045d0,
+ 0xf1050040,
+ 0xd0030057,
+ 0x57f10045,
+ 0x53f11110,
+ 0x45d01312,
+ 0x06059840,
+ 0x050040b7,
+ 0xf10045d0,
+ 0xf0260157,
+ 0x47f10153,
+ 0x44b60800,
+ 0x0045d006,
+/* 0x0492: cmd_exec */
+ 0x21f500f8,
+ 0x3fc803ab,
+ 0x0e0bf400,
+ 0x018921f5,
+ 0x020047f1,
+/* 0x04a7: cmd_exec_no_format */
+ 0xf11e0ef4,
+ 0xb6081067,
+ 0x77f00664,
+ 0x11078001,
+ 0x981c0780,
+ 0x67d02007,
+ 0x4067d000,
+/* 0x04c2: cmd_exec_init_src_surface */
+ 0x32f444bd,
+ 0xc854bd02,
+ 0x0bf4043f,
+ 0x8221f50a,
+ 0x0a0ef403,
+/* 0x04d4: src_tiled */
+ 0x027621f5,
+/* 0x04db: cmd_exec_init_dst_surface */
+ 0xf40749f0,
+ 0x57f00231,
+ 0x083fc82c,
+ 0xf50a0bf4,
+ 0xf4038221,
+/* 0x04ee: dst_tiled */
+ 0x21f50a0e,
+ 0x49f00276,
+/* 0x04f5: cmd_exec_kick */
+ 0x0057f108,
+ 0x0654b608,
+ 0xd0210698,
+ 0x67f04056,
+ 0x0063f141,
+ 0x0546fd44,
+ 0xc80054d0,
+ 0x0bf40c3f,
+ 0xc521f507,
+/* 0x0519: cmd_exec_done */
+/* 0x051b: cmd_wrcache_flush */
+ 0xf100f803,
+ 0xbd220027,
+ 0x0133f034,
+ 0xf80023d0,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
--- /dev/null
+u32 nvc0_pcopy_data[] = {
+/* 0x0000: ctx_object */
+ 0x00000000,
+/* 0x0004: ctx_query_address_high */
+ 0x00000000,
+/* 0x0008: ctx_query_address_low */
+ 0x00000000,
+/* 0x000c: ctx_query_counter */
+ 0x00000000,
+/* 0x0010: ctx_src_address_high */
+ 0x00000000,
+/* 0x0014: ctx_src_address_low */
+ 0x00000000,
+/* 0x0018: ctx_src_pitch */
+ 0x00000000,
+/* 0x001c: ctx_src_tile_mode */
+ 0x00000000,
+/* 0x0020: ctx_src_xsize */
+ 0x00000000,
+/* 0x0024: ctx_src_ysize */
+ 0x00000000,
+/* 0x0028: ctx_src_zsize */
+ 0x00000000,
+/* 0x002c: ctx_src_zoff */
+ 0x00000000,
+/* 0x0030: ctx_src_xoff */
+ 0x00000000,
+/* 0x0034: ctx_src_yoff */
+ 0x00000000,
+/* 0x0038: ctx_src_cpp */
+ 0x00000000,
+/* 0x003c: ctx_dst_address_high */
+ 0x00000000,
+/* 0x0040: ctx_dst_address_low */
+ 0x00000000,
+/* 0x0044: ctx_dst_pitch */
+ 0x00000000,
+/* 0x0048: ctx_dst_tile_mode */
+ 0x00000000,
+/* 0x004c: ctx_dst_xsize */
+ 0x00000000,
+/* 0x0050: ctx_dst_ysize */
+ 0x00000000,
+/* 0x0054: ctx_dst_zsize */
+ 0x00000000,
+/* 0x0058: ctx_dst_zoff */
+ 0x00000000,
+/* 0x005c: ctx_dst_xoff */
+ 0x00000000,
+/* 0x0060: ctx_dst_yoff */
+ 0x00000000,
+/* 0x0064: ctx_dst_cpp */
+ 0x00000000,
+/* 0x0068: ctx_format */
+ 0x00000000,
+/* 0x006c: ctx_swz_const0 */
+ 0x00000000,
+/* 0x0070: ctx_swz_const1 */
+ 0x00000000,
+/* 0x0074: ctx_xcnt */
+ 0x00000000,
+/* 0x0078: ctx_ycnt */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0100: dispatch_table */
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+ 0x00010040,
+ 0x0001019f,
+ 0x00000000,
+ 0x00010050,
+ 0x000101a1,
+ 0x00000000,
+ 0x00070080,
+ 0x0000001c,
+ 0xfffff000,
+ 0x00000020,
+ 0xfff80000,
+ 0x00000024,
+ 0xffffe000,
+ 0x00000028,
+ 0xfffff800,
+ 0x0000002c,
+ 0xfffff000,
+ 0x00000030,
+ 0xfff80000,
+ 0x00000034,
+ 0xffffe000,
+ 0x00070088,
+ 0x00000048,
+ 0xfffff000,
+ 0x0000004c,
+ 0xfff80000,
+ 0x00000050,
+ 0xffffe000,
+ 0x00000054,
+ 0xfffff800,
+ 0x00000058,
+ 0xfffff000,
+ 0x0000005c,
+ 0xfff80000,
+ 0x00000060,
+ 0xffffe000,
+ 0x000200c0,
+ 0x000104b8,
+ 0x00000000,
+ 0x00010541,
+ 0x00000000,
+ 0x000e00c3,
+ 0x00000010,
+ 0xffffff00,
+ 0x00000014,
+ 0x00000000,
+ 0x0000003c,
+ 0xffffff00,
+ 0x00000040,
+ 0x00000000,
+ 0x00000018,
+ 0xfff80000,
+ 0x00000044,
+ 0xfff80000,
+ 0x00000074,
+ 0xffff0000,
+ 0x00000078,
+ 0xffffe000,
+ 0x00000068,
+ 0xfccc0000,
+ 0x0000006c,
+ 0x00000000,
+ 0x00000070,
+ 0x00000000,
+ 0x00000004,
+ 0xffffff00,
+ 0x00000008,
+ 0x00000000,
+ 0x0000000c,
+ 0x00000000,
+ 0x00000800,
+};
+
+u32 nvc0_pcopy_code[] = {
+/* 0x0000: main */
+ 0x04fe04bd,
+ 0x3517f000,
+ 0xf10010fe,
+ 0xf1040017,
+ 0xf0fff327,
+ 0x12d00023,
+ 0x0c25f0c0,
+ 0xf40012d0,
+ 0x17f11031,
+ 0x27f01200,
+ 0x0012d003,
+/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
+/* 0x0035: ih */
+ 0x8001cffd,
+ 0xf40812c4,
+ 0x21f4060b,
+/* 0x0041: ih_no_chsw */
+ 0x0412c4ca,
+ 0xf5070bf4,
+/* 0x004b: ih_no_cmd */
+ 0xc4010221,
+ 0x01d00c11,
+/* 0x0053: swctx */
+ 0xf101f840,
+ 0xfe770047,
+ 0x47f1004b,
+ 0x44cf2100,
+ 0x0144f000,
+ 0xb60444b6,
+ 0xf7f13040,
+ 0xf4b6061c,
+ 0x1457f106,
+ 0x00f5d101,
+ 0xb6043594,
+ 0x57fe0250,
+ 0x0145fe00,
+ 0x010052b7,
+ 0x00ff67f1,
+ 0x56fd60bd,
+ 0x0253f004,
+ 0xf80545fa,
+ 0x0053f003,
+ 0xd100e7f0,
+ 0x549800fe,
+ 0x0845b600,
+ 0xb6015698,
+ 0x46fd1864,
+ 0x0047fe05,
+ 0xf00204b9,
+ 0x01f40643,
+ 0x0604fa09,
+/* 0x00c3: swctx_load */
+ 0xfa060ef4,
+/* 0x00c6: swctx_done */
+ 0x03f80504,
+/* 0x00ca: chsw */
+ 0x27f100f8,
+ 0x23cf1400,
+ 0x1e3fc800,
+ 0xf4170bf4,
+ 0x21f40132,
+ 0x1e3af053,
+ 0xf00023d0,
+ 0x24d00147,
+/* 0x00eb: chsw_no_unload */
+ 0xcf00f880,
+ 0x3dc84023,
+ 0x090bf41e,
+ 0xf40131f4,
+/* 0x00fa: chsw_finish_load */
+ 0x37f05321,
+ 0x8023d002,
+/* 0x0102: dispatch */
+ 0x37f100f8,
+ 0x32cf1900,
+ 0x0033cf40,
+ 0x07ff24e4,
+ 0xf11024b6,
+ 0xbd010057,
+/* 0x011b: dispatch_loop */
+ 0x5874bd64,
+ 0x57580056,
+ 0x0450b601,
+ 0xf40446b8,
+ 0x76bb4d08,
+ 0x0447b800,
+ 0xbb0f08f4,
+ 0x74b60276,
+ 0x0057bb03,
+/* 0x013f: dispatch_valid_mthd */
+ 0xbbdf0ef4,
+ 0x44b60246,
+ 0x0045bb03,
+ 0xfd014598,
+ 0x54b00453,
+ 0x201bf400,
+ 0x58004558,
+ 0x64b00146,
+ 0x091bf400,
+ 0xf4005380,
+/* 0x0166: dispatch_cmd */
+ 0x32f4300e,
+ 0xf455f901,
+ 0x0ef40c01,
+/* 0x0171: dispatch_invalid_bitfield */
+ 0x0225f025,
+/* 0x0174: dispatch_illegal_mthd */
+/* 0x0177: dispatch_error */
+ 0xf10125f0,
+ 0xd0100047,
+ 0x43d00042,
+ 0x4027f040,
+/* 0x0187: hostirq_wait */
+ 0xcf0002d0,
+ 0x24f08002,
+ 0x0024b040,
+/* 0x0193: dispatch_done */
+ 0xf1f71bf4,
+ 0xf01d0027,
+ 0x23d00137,
+/* 0x019f: cmd_nop */
+ 0xf800f800,
+/* 0x01a1: cmd_pm_trigger */
+ 0x0027f100,
+ 0xf034bd22,
+ 0x23d00233,
+/* 0x01af: cmd_exec_set_format */
+ 0xf400f800,
+ 0x01b0f030,
+ 0x0101b000,
+ 0xb00201b0,
+ 0x04980301,
+ 0x3045c71a,
+ 0xc70150b6,
+ 0x60b63446,
+ 0x3847c701,
+ 0xf40170b6,
+ 0x84bd0232,
+/* 0x01da: ncomp_loop */
+ 0x4ac494bd,
+ 0x0445b60f,
+/* 0x01e2: bpc_loop */
+ 0xa430b4bd,
+ 0x0f18f404,
+ 0xbbc0a5ff,
+ 0x31f400cb,
+ 0x220ef402,
+/* 0x01f4: cmp_c0 */
+ 0xf00c1bf4,
+ 0xcbbb10c7,
+ 0x160ef400,
+/* 0x0200: cmp_c1 */
+ 0xf406a430,
+ 0xc7f00c18,
+ 0x00cbbb14,
+/* 0x020f: cmp_zero */
+ 0xf1070ef4,
+/* 0x0213: bpc_next */
+ 0x380080c7,
+ 0x80b601c8,
+ 0x01b0b601,
+ 0xf404b5b8,
+ 0x90b6c308,
+ 0x0497b801,
+ 0xfdb208f4,
+ 0x06800065,
+ 0x1d08980e,
+ 0xf40068fd,
+ 0x64bd0502,
+/* 0x023c: dst_xcnt */
+ 0x800075fd,
+ 0x78fd1907,
+ 0x1057f100,
+ 0x0654b608,
+ 0xd00056d0,
+ 0x50b74057,
+ 0x06980800,
+ 0x0162b619,
+ 0x980864b6,
+ 0x72b60e07,
+ 0x0567fd01,
+ 0xb70056d0,
+ 0xb4010050,
+ 0x56d00060,
+ 0x0160b400,
+ 0xb44056d0,
+ 0x56d00260,
+ 0x0360b480,
+ 0xb7c056d0,
+ 0x98040050,
+ 0x56d01b06,
+ 0x1c069800,
+ 0xf44056d0,
+ 0x00f81030,
+/* 0x029c: cmd_exec_set_surface_tiled */
+ 0xc7075798,
+ 0x78c76879,
+ 0x0380b664,
+ 0xb06077c7,
+ 0x1bf40e76,
+ 0x0477f009,
+/* 0x02b7: xtile64 */
+ 0xf00f0ef4,
+ 0x70b6027c,
+ 0x0947fd11,
+/* 0x02c3: xtileok */
+ 0x980677f0,
+ 0x5b980c5a,
+ 0x00abfd0e,
+ 0xbb01b7f0,
+ 0xb2b604b7,
+ 0xc4abff01,
+ 0x9805a7bb,
+ 0xe7f00d5d,
+ 0x04e8bb01,
+ 0xff01e2b6,
+ 0xd8bbb4de,
+ 0x01e0b605,
+ 0xbb0cef94,
+ 0xfefd02eb,
+ 0x026cf005,
+ 0x020860b7,
+ 0xd00864b6,
+ 0xb7bb006f,
+ 0x00cbbb04,
+ 0x98085f98,
+ 0xfbfd0e5b,
+ 0x01b7f000,
+ 0xb604b7bb,
+ 0xfbbb01b2,
+ 0x05f7bb00,
+ 0x5f98f0f9,
+ 0x01b7f009,
+ 0xb604b8bb,
+ 0xfbbb01b2,
+ 0x05f8bb00,
+ 0x78bbf0f9,
+ 0x0282b600,
+ 0xbb01b7f0,
+ 0xb9bb04b8,
+ 0x0b589804,
+ 0xbb01e7f0,
+ 0xe2b604e9,
+ 0xf48eff01,
+ 0xbb04f7bb,
+ 0x79bb00cf,
+ 0x0589bb00,
+ 0x90fcf0fc,
+ 0xbb00d9fd,
+ 0x89fd00ad,
+ 0x008ffd00,
+ 0xbb00a8bb,
+ 0x92b604a7,
+ 0x0497bb01,
+ 0x988069d0,
+ 0x58980557,
+ 0x00acbb04,
+ 0xb6007abb,
+ 0x84b60081,
+ 0x058bfd10,
+ 0x060062b7,
+ 0xb70067d0,
+ 0xd0040060,
+ 0x00f80068,
+/* 0x03a8: cmd_exec_set_surface_linear */
+ 0xb7026cf0,
+ 0xb6020260,
+ 0x57980864,
+ 0x0067d005,
+ 0x040060b7,
+ 0xb6045798,
+ 0x67d01074,
+ 0x0060b700,
+ 0x06579804,
+ 0xf80067d0,
+/* 0x03d1: cmd_exec_wait */
+ 0xf900f900,
+ 0x0007f110,
+ 0x0604b608,
+/* 0x03dc: loop */
+ 0xf00001cf,
+ 0x1bf40114,
+ 0xfc10fcfa,
+/* 0x03eb: cmd_exec_query */
+ 0xc800f800,
+ 0x1bf40d34,
+ 0xd121f570,
+ 0x0c47f103,
+ 0x0644b608,
+ 0xb6020598,
+ 0x45d00450,
+ 0x4040d000,
+ 0xd00c57f0,
+ 0x40b78045,
+ 0x05980400,
+ 0x1054b601,
+ 0xb70045d0,
+ 0xf1050040,
+ 0xf00b0057,
+ 0x45d00153,
+ 0x4057f100,
+ 0x0154b640,
+ 0x808053f1,
+ 0xf14045d0,
+ 0xf1111057,
+ 0xd0131253,
+ 0x57f18045,
+ 0x53f11514,
+ 0x45d01716,
+ 0x0157f1c0,
+ 0x0153f026,
+ 0x080047f1,
+ 0xd00644b6,
+/* 0x045e: query_counter */
+ 0x21f50045,
+ 0x47f103d1,
+ 0x44b6080c,
+ 0x02059806,
+ 0xd00045d0,
+ 0x57f04040,
+ 0x8045d004,
+ 0x040040b7,
+ 0xb6010598,
+ 0x45d01054,
+ 0x0040b700,
+ 0x0057f105,
+ 0x0045d003,
+ 0x111057f1,
+ 0x131253f1,
+ 0x984045d0,
+ 0x40b70305,
+ 0x45d00500,
+ 0x0157f100,
+ 0x0153f026,
+ 0x080047f1,
+ 0xd00644b6,
+ 0x00f80045,
+/* 0x04b8: cmd_exec */
+ 0x03d121f5,
+ 0xf4003fc8,
+ 0x21f50e0b,
+ 0x47f101af,
+ 0x0ef40200,
+/* 0x04cd: cmd_exec_no_format */
+ 0x1067f11e,
+ 0x0664b608,
+ 0x800177f0,
+ 0x07800e07,
+ 0x1d079819,
+ 0xd00067d0,
+ 0x44bd4067,
+/* 0x04e8: cmd_exec_init_src_surface */
+ 0xbd0232f4,
+ 0x043fc854,
+ 0xf50a0bf4,
+ 0xf403a821,
+/* 0x04fa: src_tiled */
+ 0x21f50a0e,
+ 0x49f0029c,
+/* 0x0501: cmd_exec_init_dst_surface */
+ 0x0231f407,
+ 0xc82c57f0,
+ 0x0bf4083f,
+ 0xa821f50a,
+ 0x0a0ef403,
+/* 0x0514: dst_tiled */
+ 0x029c21f5,
+/* 0x051b: cmd_exec_kick */
+ 0xf10849f0,
+ 0xb6080057,
+ 0x06980654,
+ 0x4056d01e,
+ 0xf14167f0,
+ 0xfd440063,
+ 0x54d00546,
+ 0x0c3fc800,
+ 0xf5070bf4,
+/* 0x053f: cmd_exec_done */
+ 0xf803eb21,
+/* 0x0541: cmd_wrcache_flush */
+ 0x0027f100,
+ 0xf034bd22,
+ 0x23d00133,
+ 0x0000f800,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include <subdev/vm.h>
+#include <core/ramht.h>
+#include "fuc/nva3.fuc.h"
+
+struct nva3_copy_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nva3_copy_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin;
+ struct nouveau_gpuobj *ctx = NULL;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &ctx);
+ if (ret)
+ return ret;
+
+ nv_wo32(ramin, 0xc0, 0x00190000);
+ nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
+ nv_wo32(ramin, 0xc8, ctx->vinst);
+ nv_wo32(ramin, 0xcc, 0x00000000);
+ nv_wo32(ramin, 0xd0, 0x00000000);
+ nv_wo32(ramin, 0xd4, 0x00000000);
+ dev_priv->engine.instmem.flush(dev);
+
+ atomic_inc(&chan->vm->engref[engine]);
+ chan->engctx[engine] = ctx;
+ return 0;
+}
+
+static int
+nva3_copy_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct nouveau_gpuobj *ctx = chan->engctx[engine];
+
+ /* fuc engine doesn't need an object, our ramht code does.. */
+ ctx->engine = 3;
+ ctx->class = class;
+ return nouveau_ramht_insert(chan, handle, ctx);
+}
+
+static void
+nva3_copy_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nouveau_gpuobj *ctx = chan->engctx[engine];
+ int i;
+
+ for (i = 0xc0; i <= 0xd4; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ nouveau_gpuobj_ref(NULL, &ctx);
+ chan->engctx[engine] = ctx;
+}
+
+static void
+nva3_copy_tlb_flush(struct drm_device *dev, int engine)
+{
+ nv50_vm_flush_engine(dev, 0x0d);
+}
+
+static int
+nva3_copy_init(struct drm_device *dev, int engine)
+{
+ int i;
+
+ nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
+ nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
+
+ /* upload ucode */
+ nv_wr32(dev, 0x1041c0, 0x01000000);
+ for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
+ nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
+
+ nv_wr32(dev, 0x104180, 0x01000000);
+ for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, 0x104188, i >> 6);
+ nv_wr32(dev, 0x104184, nva3_pcopy_code[i]);
+ }
+
+ /* start it running */
+ nv_wr32(dev, 0x10410c, 0x00000000);
+ nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
+ nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
+ return 0;
+}
+
+static int
+nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
+ nv_wr32(dev, 0x104014, 0xffffffff);
+ return 0;
+}
+
+static struct nouveau_enum nva3_copy_isr_error_name[] = {
+ { 0x0001, "ILLEGAL_MTHD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "INVALID_BITFIELD" },
+ {}
+};
+
+static void
+nva3_copy_isr(struct drm_device *dev)
+{
+ u32 dispatch = nv_rd32(dev, 0x10401c);
+ u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16);
+ u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff;
+ u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff;
+ u32 addr = nv_rd32(dev, 0x104040) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(dev, 0x104044);
+ int chid = nv50_graph_isr_chid(dev, inst);
+
+ if (stat & 0x00000040) {
+ NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
+ nouveau_enum_print(nva3_copy_isr_error_name, ssta);
+ printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, mthd, data);
+ nv_wr32(dev, 0x104004, 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
+ nv_wr32(dev, 0x104004, stat);
+ }
+ nv50_fb_vm_trap(dev, 1);
+}
+
+static void
+nva3_copy_destroy(struct drm_device *dev, int engine)
+{
+ struct nva3_copy_engine *pcopy = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 22);
+
+ NVOBJ_ENGINE_DEL(dev, COPY0);
+ kfree(pcopy);
+}
+
+int
+nva3_copy_create(struct drm_device *dev)
+{
+ struct nva3_copy_engine *pcopy;
+
+ pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
+ if (!pcopy)
+ return -ENOMEM;
+
+ pcopy->base.destroy = nva3_copy_destroy;
+ pcopy->base.init = nva3_copy_init;
+ pcopy->base.fini = nva3_copy_fini;
+ pcopy->base.context_new = nva3_copy_context_new;
+ pcopy->base.context_del = nva3_copy_context_del;
+ pcopy->base.object_new = nva3_copy_object_new;
+ pcopy->base.tlb_flush = nva3_copy_tlb_flush;
+
+ nouveau_irq_register(dev, 22, nva3_copy_isr);
+
+ NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
+ NVOBJ_CLASS(dev, 0x85b5, COPY0);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/firmware.h>
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include <subdev/vm.h>
+#include <core/ramht.h>
+#include "fuc/nvc0.fuc.h"
+
+struct nvc0_copy_engine {
+ struct nouveau_exec_engine base;
+ u32 irq;
+ u32 pmc;
+ u32 fuc;
+ u32 ctx;
+};
+
+static int
+nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin;
+ struct nouveau_gpuobj *ctx = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 256,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
+ NVOBJ_FLAG_ZERO_ALLOC, &ctx);
+ if (ret)
+ return ret;
+
+ nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
+ nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
+ dev_priv->engine.instmem.flush(dev);
+
+ chan->engctx[engine] = ctx;
+ return 0;
+}
+
+static int
+nvc0_copy_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ return 0;
+}
+
+static void
+nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
+ struct nouveau_gpuobj *ctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ u32 inst;
+
+ inst = (chan->ramin->vinst >> 12);
+ inst |= 0x40000000;
+
+ /* disable fifo access */
+ nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
+ /* mark channel as unloaded if it's currently active */
+ if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
+ nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
+ /* mark next channel as invalid if it's about to be loaded */
+ if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
+ nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
+ /* restore fifo access */
+ nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
+
+ nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
+ nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
+ nouveau_gpuobj_ref(NULL, &ctx);
+
+ chan->engctx[engine] = ctx;
+}
+
+static int
+nvc0_copy_init(struct drm_device *dev, int engine)
+{
+ struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
+ int i;
+
+ nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
+ nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
+ nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
+
+ nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
+ nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
+
+ nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, pcopy->fuc + 0x188, i >> 6);
+ nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]);
+ }
+
+ nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
+ nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
+ nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
+ nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
+ return 0;
+}
+
+static int
+nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
+
+ nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000);
+
+ /* trigger fuc context unload */
+ nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000);
+ nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
+ nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008);
+ nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000);
+
+ nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
+ return 0;
+}
+
+static struct nouveau_enum nvc0_copy_isr_error_name[] = {
+ { 0x0001, "ILLEGAL_MTHD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "INVALID_BITFIELD" },
+ {}
+};
+
+static void
+nvc0_copy_isr(struct drm_device *dev, int engine)
+{
+ struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
+ u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c);
+ u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16);
+ u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12;
+ u32 chid = nvc0_graph_isr_chid(dev, inst);
+ u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff;
+ u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(dev, pcopy->fuc + 0x044);
+
+ if (stat & 0x00000040) {
+ NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
+ nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
+ printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, mthd, data);
+ nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
+ nv_wr32(dev, pcopy->fuc + 0x004, stat);
+ }
+}
+
+static void
+nvc0_copy_isr_0(struct drm_device *dev)
+{
+ nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0);
+}
+
+static void
+nvc0_copy_isr_1(struct drm_device *dev)
+{
+ nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1);
+}
+
+static void
+nvc0_copy_destroy(struct drm_device *dev, int engine)
+{
+ struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, pcopy->irq);
+
+ if (engine == NVOBJ_ENGINE_COPY0)
+ NVOBJ_ENGINE_DEL(dev, COPY0);
+ else
+ NVOBJ_ENGINE_DEL(dev, COPY1);
+ kfree(pcopy);
+}
+
+int
+nvc0_copy_create(struct drm_device *dev, int engine)
+{
+ struct nvc0_copy_engine *pcopy;
+
+ pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
+ if (!pcopy)
+ return -ENOMEM;
+
+ pcopy->base.destroy = nvc0_copy_destroy;
+ pcopy->base.init = nvc0_copy_init;
+ pcopy->base.fini = nvc0_copy_fini;
+ pcopy->base.context_new = nvc0_copy_context_new;
+ pcopy->base.context_del = nvc0_copy_context_del;
+ pcopy->base.object_new = nvc0_copy_object_new;
+
+ if (engine == 0) {
+ pcopy->irq = 5;
+ pcopy->pmc = 0x00000040;
+ pcopy->fuc = 0x104000;
+ pcopy->ctx = 0x0230;
+ nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0);
+ NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
+ NVOBJ_CLASS(dev, 0x90b5, COPY0);
+ } else {
+ pcopy->irq = 6;
+ pcopy->pmc = 0x00000080;
+ pcopy->fuc = 0x105000;
+ pcopy->ctx = 0x0240;
+ nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
+ NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
+ NVOBJ_CLASS(dev, 0x90b8, COPY1);
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * fuc microcode for nv98 pcrypt engine
+ * Copyright (C) 2010 Marcin Kościelnicki
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+.section #nv98_pcrypt_data
+
+ctx_dma:
+ctx_dma_query: .b32 0
+ctx_dma_src: .b32 0
+ctx_dma_dst: .b32 0
+.equ #dma_count 3
+ctx_query_address_high: .b32 0
+ctx_query_address_low: .b32 0
+ctx_query_counter: .b32 0
+ctx_cond_address_high: .b32 0
+ctx_cond_address_low: .b32 0
+ctx_cond_off: .b32 0
+ctx_src_address_high: .b32 0
+ctx_src_address_low: .b32 0
+ctx_dst_address_high: .b32 0
+ctx_dst_address_low: .b32 0
+ctx_mode: .b32 0
+.align 16
+ctx_key: .skip 16
+ctx_iv: .skip 16
+
+.align 0x80
+swap:
+.skip 32
+
+.align 8
+common_cmd_dtable:
+.b32 #ctx_query_address_high + 0x20000 ~0xff
+.b32 #ctx_query_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_query_counter + 0x20000 ~0xffffffff
+.b32 #cmd_query_get + 0x00000 ~1
+.b32 #ctx_cond_address_high + 0x20000 ~0xff
+.b32 #ctx_cond_address_low + 0x20000 ~0xfffffff0
+.b32 #cmd_cond_mode + 0x00000 ~7
+.b32 #cmd_wrcache_flush + 0x00000 ~0
+.equ #common_cmd_max 0x88
+
+
+.align 8
+engine_cmd_dtable:
+.b32 #ctx_key + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_key + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x0 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x4 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0x8 + 0x20000 ~0xffffffff
+.b32 #ctx_iv + 0xc + 0x20000 ~0xffffffff
+.b32 #ctx_src_address_high + 0x20000 ~0xff
+.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0
+.b32 #ctx_dst_address_high + 0x20000 ~0xff
+.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0
+.b32 #crypt_cmd_mode + 0x00000 ~0xf
+.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0
+.equ #engine_cmd_max 0xce
+
+.align 4
+crypt_dtable:
+.b16 #crypt_copy_prep #crypt_do_inout
+.b16 #crypt_store_prep #crypt_do_out
+.b16 #crypt_ecb_e_prep #crypt_do_inout
+.b16 #crypt_ecb_d_prep #crypt_do_inout
+.b16 #crypt_cbc_e_prep #crypt_do_inout
+.b16 #crypt_cbc_d_prep #crypt_do_inout
+.b16 #crypt_pcbc_e_prep #crypt_do_inout
+.b16 #crypt_pcbc_d_prep #crypt_do_inout
+.b16 #crypt_cfb_e_prep #crypt_do_inout
+.b16 #crypt_cfb_d_prep #crypt_do_inout
+.b16 #crypt_ofb_prep #crypt_do_inout
+.b16 #crypt_ctr_prep #crypt_do_inout
+.b16 #crypt_cbc_mac_prep #crypt_do_in
+.b16 #crypt_cmac_finish_complete_prep #crypt_do_in
+.b16 #crypt_cmac_finish_partial_prep #crypt_do_in
+
+.align 0x100
+
+.section #nv98_pcrypt_code
+
+ // $r0 is always set to 0 in our code - this allows some space savings.
+ clear b32 $r0
+
+ // set up the interrupt handler
+ mov $r1 #ih
+ mov $iv0 $r1
+
+ // init stack pointer
+ mov $sp $r0
+
+ // set interrupt dispatch - route timer, fifo, ctxswitch to i0, others to host
+ movw $r1 0xfff0
+ sethi $r1 0
+ mov $r2 0x400
+ iowr I[$r2 + 0x300] $r1
+
+ // enable the interrupts
+ or $r1 0xc
+ iowr I[$r2] $r1
+
+ // enable fifo access and context switching
+ mov $r1 3
+ mov $r2 0x1200
+ iowr I[$r2] $r1
+
+ // enable i0 delivery
+ bset $flags ie0
+
+ // sleep forver, waking only for interrupts.
+ bset $flags $p0
+ spin:
+ sleep $p0
+ bra #spin
+
+// i0 handler
+ih:
+ // see which interrupts we got
+ iord $r1 I[$r0 + 0x200]
+
+ and $r2 $r1 0x8
+ cmpu b32 $r2 0
+ bra e #noctx
+
+ // context switch... prepare the regs for xfer
+ mov $r2 0x7700
+ mov $xtargets $r2
+ mov $xdbase $r0
+ // 128-byte context.
+ mov $r2 0
+ sethi $r2 0x50000
+
+ // read current channel
+ mov $r3 0x1400
+ iord $r4 I[$r3]
+ // if bit 30 set, it's active, so we have to unload it first.
+ shl b32 $r5 $r4 1
+ cmps b32 $r5 0
+ bra nc #ctxload
+
+ // unload the current channel - save the context
+ xdst $r0 $r2
+ xdwait
+ // and clear bit 30, then write back
+ bclr $r4 0x1e
+ iowr I[$r3] $r4
+ // tell PFIFO we unloaded
+ mov $r4 1
+ iowr I[$r3 + 0x200] $r4
+
+ bra #noctx
+
+ ctxload:
+ // no channel loaded - perhaps we're requested to load one
+ iord $r4 I[$r3 + 0x100]
+ shl b32 $r15 $r4 1
+ cmps b32 $r15 0
+ // if bit 30 of next channel not set, probably PFIFO is just
+ // killing a context. do a faux load, without the active bit.
+ bra nc #dummyload
+
+ // ok, do a real context load.
+ xdld $r0 $r2
+ xdwait
+ mov $r5 #ctx_dma
+ mov $r6 #dma_count - 1
+ ctxload_dma_loop:
+ ld b32 $r7 D[$r5 + $r6 * 4]
+ add b32 $r8 $r6 0x180
+ shl b32 $r8 8
+ iowr I[$r8] $r7
+ sub b32 $r6 1
+ bra nc #ctxload_dma_loop
+
+ dummyload:
+ // tell PFIFO we're done
+ mov $r5 2
+ iowr I[$r3 + 0x200] $r5
+
+ noctx:
+ and $r2 $r1 0x4
+ cmpu b32 $r2 0
+ bra e #nocmd
+
+ // incoming fifo command.
+ mov $r3 0x1900
+ iord $r2 I[$r3 + 0x100]
+ iord $r3 I[$r3]
+ // extract the method
+ and $r4 $r2 0x7ff
+ // shift the addr to proper position if we need to interrupt later
+ shl b32 $r2 0x10
+
+ // mthd 0 and 0x100 [NAME, NOP]: ignore
+ and $r5 $r4 0x7bf
+ cmpu b32 $r5 0
+ bra e #cmddone
+
+ mov $r5 #engine_cmd_dtable - 0xc0 * 8
+ mov $r6 #engine_cmd_max
+ cmpu b32 $r4 0xc0
+ bra nc #dtable_cmd
+ mov $r5 #common_cmd_dtable - 0x80 * 8
+ mov $r6 #common_cmd_max
+ cmpu b32 $r4 0x80
+ bra nc #dtable_cmd
+ cmpu b32 $r4 0x60
+ bra nc #dma_cmd
+ cmpu b32 $r4 0x50
+ bra ne #illegal_mthd
+
+ // mthd 0x140: PM_TRIGGER
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x20000
+ iowr I[$r2] $r3
+ bra #cmddone
+
+ dma_cmd:
+ // mthd 0x180...: DMA_*
+ cmpu b32 $r4 0x60+#dma_count
+ bra nc #illegal_mthd
+ shl b32 $r5 $r4 2
+ add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff
+ bset $r3 0x1e
+ st b32 D[$r5] $r3
+ add b32 $r4 0x180 - 0x60
+ shl b32 $r4 8
+ iowr I[$r4] $r3
+ bra #cmddone
+
+ dtable_cmd:
+ cmpu b32 $r4 $r6
+ bra nc #illegal_mthd
+ shl b32 $r4 3
+ add b32 $r4 $r5
+ ld b32 $r5 D[$r4 + 4]
+ and $r5 $r3
+ cmpu b32 $r5 0
+ bra ne #invalid_bitfield
+ ld b16 $r5 D[$r4]
+ ld b16 $r6 D[$r4 + 2]
+ cmpu b32 $r6 2
+ bra e #cmd_setctx
+ ld b32 $r7 D[$r0 + #ctx_cond_off]
+ and $r6 $r7
+ cmpu b32 $r6 1
+ bra e #cmddone
+ call $r5
+ bra $p1 #dispatch_error
+ bra #cmddone
+
+ cmd_setctx:
+ st b32 D[$r5] $r3
+ bra #cmddone
+
+
+ invalid_bitfield:
+ or $r2 1
+ dispatch_error:
+ illegal_mthd:
+ mov $r4 0x1000
+ iowr I[$r4] $r2
+ iowr I[$r4 + 0x100] $r3
+ mov $r4 0x40
+ iowr I[$r0] $r4
+
+ im_loop:
+ iord $r4 I[$r0 + 0x200]
+ and $r4 0x40
+ cmpu b32 $r4 0
+ bra ne #im_loop
+
+ cmddone:
+ // remove the command from FIFO
+ mov $r3 0x1d00
+ mov $r4 1
+ iowr I[$r3] $r4
+
+ nocmd:
+ // ack the processed interrupts
+ and $r1 $r1 0xc
+ iowr I[$r0 + 0x100] $r1
+iret
+
+cmd_query_get:
+ // if bit 0 of param set, trigger interrupt afterwards.
+ setp $p1 $r3
+ or $r2 3
+
+ // read PTIMER, beware of races...
+ mov $r4 0xb00
+ ptimer_retry:
+ iord $r6 I[$r4 + 0x100]
+ iord $r5 I[$r4]
+ iord $r7 I[$r4 + 0x100]
+ cmpu b32 $r6 $r7
+ bra ne #ptimer_retry
+
+ // prepare the query structure
+ ld b32 $r4 D[$r0 + #ctx_query_counter]
+ st b32 D[$r0 + #swap + 0x0] $r4
+ st b32 D[$r0 + #swap + 0x4] $r0
+ st b32 D[$r0 + #swap + 0x8] $r5
+ st b32 D[$r0 + #swap + 0xc] $r6
+
+ // will use target 0, DMA_QUERY.
+ mov $xtargets $r0
+
+ ld b32 $r4 D[$r0 + #ctx_query_address_high]
+ shl b32 $r4 0x18
+ mov $xdbase $r4
+
+ ld b32 $r4 D[$r0 + #ctx_query_address_low]
+ mov $r5 #swap
+ sethi $r5 0x20000
+ xdst $r4 $r5
+ xdwait
+
+ ret
+
+cmd_cond_mode:
+ // if >= 5, INVALID_ENUM
+ bset $flags $p1
+ or $r2 2
+ cmpu b32 $r3 5
+ bra nc #return
+
+ // otherwise, no error.
+ bclr $flags $p1
+
+ // if < 2, no QUERY object is involved
+ cmpu b32 $r3 2
+ bra nc #cmd_cond_mode_queryful
+
+ xor $r3 1
+ st b32 D[$r0 + #ctx_cond_off] $r3
+ return:
+ ret
+
+ cmd_cond_mode_queryful:
+ // ok, will need to pull a QUERY object, prepare offsets
+ ld b32 $r4 D[$r0 + #ctx_cond_address_high]
+ ld b32 $r5 D[$r0 + #ctx_cond_address_low]
+ and $r6 $r5 0xff
+ shr b32 $r5 8
+ shl b32 $r4 0x18
+ or $r4 $r5
+ mov $xdbase $r4
+ mov $xtargets $r0
+
+ // pull the first one
+ mov $r5 #swap
+ sethi $r5 0x20000
+ xdld $r6 $r5
+
+ // if == 2, only a single QUERY is involved...
+ cmpu b32 $r3 2
+ bra ne #cmd_cond_mode_double
+
+ xdwait
+ ld b32 $r4 D[$r0 + #swap + 4]
+ cmpu b32 $r4 0
+ xbit $r4 $flags z
+ st b32 D[$r0 + #ctx_cond_off] $r4
+ ret
+
+ // ok, we'll need to pull second one too
+ cmd_cond_mode_double:
+ add b32 $r6 0x10
+ add b32 $r5 0x10
+ xdld $r6 $r5
+ xdwait
+
+ // compare COUNTERs
+ ld b32 $r5 D[$r0 + #swap + 0x00]
+ ld b32 $r6 D[$r0 + #swap + 0x10]
+ cmpu b32 $r5 $r6
+ xbit $r4 $flags z
+
+ // compare RESen
+ ld b32 $r5 D[$r0 + #swap + 0x04]
+ ld b32 $r6 D[$r0 + #swap + 0x14]
+ cmpu b32 $r5 $r6
+ xbit $r5 $flags z
+ and $r4 $r5
+
+ // and negate or not, depending on mode
+ cmpu b32 $r3 3
+ xbit $r5 $flags z
+ xor $r4 $r5
+ st b32 D[$r0 + #ctx_cond_off] $r4
+ ret
+
+cmd_wrcache_flush:
+ bclr $flags $p1
+ mov $r2 0x2200
+ clear b32 $r3
+ sethi $r3 0x10000
+ iowr I[$r2] $r3
+ ret
+
+crypt_cmd_mode:
+ // if >= 0xf, INVALID_ENUM
+ bset $flags $p1
+ or $r2 2
+ cmpu b32 $r3 0xf
+ bra nc #crypt_cmd_mode_return
+
+ bclr $flags $p1
+ st b32 D[$r0 + #ctx_mode] $r3
+
+ crypt_cmd_mode_return:
+ ret
+
+crypt_cmd_length:
+ // nop if length == 0
+ cmpu b32 $r3 0
+ bra e #crypt_cmd_mode_return
+
+ // init key, IV
+ cxset 3
+ mov $r4 #ctx_key
+ sethi $r4 0x70000
+ xdst $r0 $r4
+ mov $r4 #ctx_iv
+ sethi $r4 0x60000
+ xdst $r0 $r4
+ xdwait
+ ckeyreg $c7
+
+ // prepare the targets
+ mov $r4 0x2100
+ mov $xtargets $r4
+
+ // prepare src address
+ ld b32 $r4 D[$r0 + #ctx_src_address_high]
+ ld b32 $r5 D[$r0 + #ctx_src_address_low]
+ shr b32 $r8 $r5 8
+ shl b32 $r4 0x18
+ or $r4 $r8
+ and $r5 $r5 0xff
+
+ // prepare dst address
+ ld b32 $r6 D[$r0 + #ctx_dst_address_high]
+ ld b32 $r7 D[$r0 + #ctx_dst_address_low]
+ shr b32 $r8 $r7 8
+ shl b32 $r6 0x18
+ or $r6 $r8
+ and $r7 $r7 0xff
+
+ // find the proper prep & do functions
+ ld b32 $r8 D[$r0 + #ctx_mode]
+ shl b32 $r8 2
+
+ // run prep
+ ld b16 $r9 D[$r8 + #crypt_dtable]
+ call $r9
+
+ // do it
+ ld b16 $r9 D[$r8 + #crypt_dtable + 2]
+ call $r9
+ cxset 1
+ xdwait
+ cxset 0x61
+ xdwait
+ xdwait
+
+ // update src address
+ shr b32 $r8 $r4 0x18
+ shl b32 $r9 $r4 8
+ add b32 $r9 $r5
+ adc b32 $r8 0
+ st b32 D[$r0 + #ctx_src_address_high] $r8
+ st b32 D[$r0 + #ctx_src_address_low] $r9
+
+ // update dst address
+ shr b32 $r8 $r6 0x18
+ shl b32 $r9 $r6 8
+ add b32 $r9 $r7
+ adc b32 $r8 0
+ st b32 D[$r0 + #ctx_dst_address_high] $r8
+ st b32 D[$r0 + #ctx_dst_address_low] $r9
+
+ // pull updated IV
+ cxset 2
+ mov $r4 #ctx_iv
+ sethi $r4 0x60000
+ xdld $r0 $r4
+ xdwait
+
+ ret
+
+
+crypt_copy_prep:
+ cs0begin 2
+ cxsin $c0
+ cxsout $c0
+ ret
+
+crypt_store_prep:
+ cs0begin 1
+ cxsout $c6
+ ret
+
+crypt_ecb_e_prep:
+ cs0begin 3
+ cxsin $c0
+ cenc $c0 $c0
+ cxsout $c0
+ ret
+
+crypt_ecb_d_prep:
+ ckexp $c7 $c7
+ cs0begin 3
+ cxsin $c0
+ cdec $c0 $c0
+ cxsout $c0
+ ret
+
+crypt_cbc_e_prep:
+ cs0begin 4
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ cxsout $c6
+ ret
+
+crypt_cbc_d_prep:
+ ckexp $c7 $c7
+ cs0begin 5
+ cmov $c2 $c6
+ cxsin $c6
+ cdec $c0 $c6
+ cxor $c0 $c2
+ cxsout $c0
+ ret
+
+crypt_pcbc_e_prep:
+ cs0begin 5
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ cxsout $c6
+ cxor $c6 $c0
+ ret
+
+crypt_pcbc_d_prep:
+ ckexp $c7 $c7
+ cs0begin 5
+ cxsin $c0
+ cdec $c1 $c0
+ cxor $c6 $c1
+ cxsout $c6
+ cxor $c6 $c0
+ ret
+
+crypt_cfb_e_prep:
+ cs0begin 4
+ cenc $c6 $c6
+ cxsin $c0
+ cxor $c6 $c0
+ cxsout $c6
+ ret
+
+crypt_cfb_d_prep:
+ cs0begin 4
+ cenc $c0 $c6
+ cxsin $c6
+ cxor $c0 $c6
+ cxsout $c0
+ ret
+
+crypt_ofb_prep:
+ cs0begin 4
+ cenc $c6 $c6
+ cxsin $c0
+ cxor $c0 $c6
+ cxsout $c0
+ ret
+
+crypt_ctr_prep:
+ cs0begin 5
+ cenc $c1 $c6
+ cadd $c6 1
+ cxsin $c0
+ cxor $c0 $c1
+ cxsout $c0
+ ret
+
+crypt_cbc_mac_prep:
+ cs0begin 3
+ cxsin $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+crypt_cmac_finish_complete_prep:
+ cs0begin 7
+ cxsin $c0
+ cxor $c6 $c0
+ cxor $c0 $c0
+ cenc $c0 $c0
+ cprecmac $c0 $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+crypt_cmac_finish_partial_prep:
+ cs0begin 8
+ cxsin $c0
+ cxor $c6 $c0
+ cxor $c0 $c0
+ cenc $c0 $c0
+ cprecmac $c0 $c0
+ cprecmac $c0 $c0
+ cxor $c6 $c0
+ cenc $c6 $c6
+ ret
+
+// TODO
+crypt_do_in:
+ add b32 $r3 $r5
+ mov $xdbase $r4
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_in_loop:
+ xdld $r5 $r9
+ xdwait
+ cxset 0x22
+ xdst $r0 $r9
+ cs0exec 1
+ xdwait
+ add b32 $r5 0x10
+ cmpu b32 $r5 $r3
+ bra ne #crypt_do_in_loop
+ cxset 1
+ xdwait
+ ret
+
+crypt_do_out:
+ add b32 $r3 $r7
+ mov $xdbase $r6
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_out_loop:
+ cs0exec 1
+ cxset 0x61
+ xdld $r7 $r9
+ xdst $r7 $r9
+ cxset 1
+ xdwait
+ add b32 $r7 0x10
+ cmpu b32 $r7 $r3
+ bra ne #crypt_do_out_loop
+ ret
+
+crypt_do_inout:
+ add b32 $r3 $r5
+ mov $r9 #swap
+ sethi $r9 0x20000
+ crypt_do_inout_loop:
+ mov $xdbase $r4
+ xdld $r5 $r9
+ xdwait
+ cxset 0x21
+ xdst $r0 $r9
+ cs0exec 1
+ cxset 0x61
+ mov $xdbase $r6
+ xdld $r7 $r9
+ xdst $r7 $r9
+ cxset 1
+ xdwait
+ add b32 $r5 0x10
+ add b32 $r7 0x10
+ cmpu b32 $r5 $r3
+ bra ne #crypt_do_inout_loop
+ ret
+
+.align 0x100
--- /dev/null
+uint32_t nv98_pcrypt_data[] = {
+/* 0x0000: ctx_dma */
+/* 0x0000: ctx_dma_query */
+ 0x00000000,
+/* 0x0004: ctx_dma_src */
+ 0x00000000,
+/* 0x0008: ctx_dma_dst */
+ 0x00000000,
+/* 0x000c: ctx_query_address_high */
+ 0x00000000,
+/* 0x0010: ctx_query_address_low */
+ 0x00000000,
+/* 0x0014: ctx_query_counter */
+ 0x00000000,
+/* 0x0018: ctx_cond_address_high */
+ 0x00000000,
+/* 0x001c: ctx_cond_address_low */
+ 0x00000000,
+/* 0x0020: ctx_cond_off */
+ 0x00000000,
+/* 0x0024: ctx_src_address_high */
+ 0x00000000,
+/* 0x0028: ctx_src_address_low */
+ 0x00000000,
+/* 0x002c: ctx_dst_address_high */
+ 0x00000000,
+/* 0x0030: ctx_dst_address_low */
+ 0x00000000,
+/* 0x0034: ctx_mode */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0040: ctx_key */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0050: ctx_iv */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x0080: swap */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+/* 0x00a0: common_cmd_dtable */
+ 0x0002000c,
+ 0xffffff00,
+ 0x00020010,
+ 0x0000000f,
+ 0x00020014,
+ 0x00000000,
+ 0x00000192,
+ 0xfffffffe,
+ 0x00020018,
+ 0xffffff00,
+ 0x0002001c,
+ 0x0000000f,
+ 0x000001d7,
+ 0xfffffff8,
+ 0x00000260,
+ 0xffffffff,
+/* 0x00e0: engine_cmd_dtable */
+ 0x00020040,
+ 0x00000000,
+ 0x00020044,
+ 0x00000000,
+ 0x00020048,
+ 0x00000000,
+ 0x0002004c,
+ 0x00000000,
+ 0x00020050,
+ 0x00000000,
+ 0x00020054,
+ 0x00000000,
+ 0x00020058,
+ 0x00000000,
+ 0x0002005c,
+ 0x00000000,
+ 0x00020024,
+ 0xffffff00,
+ 0x00020028,
+ 0x0000000f,
+ 0x0002002c,
+ 0xffffff00,
+ 0x00020030,
+ 0x0000000f,
+ 0x00000271,
+ 0xfffffff0,
+ 0x00010285,
+ 0xf000000f,
+/* 0x0150: crypt_dtable */
+ 0x04db0321,
+ 0x04b1032f,
+ 0x04db0339,
+ 0x04db034b,
+ 0x04db0361,
+ 0x04db0377,
+ 0x04db0395,
+ 0x04db03af,
+ 0x04db03cd,
+ 0x04db03e3,
+ 0x04db03f9,
+ 0x04db040f,
+ 0x04830429,
+ 0x0483043b,
+ 0x0483045d,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nv98_pcrypt_code[] = {
+ 0x17f004bd,
+ 0x0010fe35,
+ 0xf10004fe,
+ 0xf0fff017,
+ 0x27f10013,
+ 0x21d00400,
+ 0x0c15f0c0,
+ 0xf00021d0,
+ 0x27f10317,
+ 0x21d01200,
+ 0x1031f400,
+/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
+/* 0x0035: ih */
+ 0x8001cffd,
+ 0xb00812c4,
+ 0x0bf40024,
+ 0x0027f167,
+ 0x002bfe77,
+ 0xf00007fe,
+ 0x23f00027,
+ 0x0037f105,
+ 0x0034cf14,
+ 0xb0014594,
+ 0x18f40055,
+ 0x0602fa17,
+ 0x4af003f8,
+ 0x0034d01e,
+ 0xd00147f0,
+ 0x0ef48034,
+/* 0x0075: ctxload */
+ 0x4034cf33,
+ 0xb0014f94,
+ 0x18f400f5,
+ 0x0502fa21,
+ 0x57f003f8,
+ 0x0267f000,
+/* 0x008c: ctxload_dma_loop */
+ 0xa07856bc,
+ 0xb6018068,
+ 0x87d00884,
+ 0x0162b600,
+/* 0x009f: dummyload */
+ 0xf0f018f4,
+ 0x35d00257,
+/* 0x00a5: noctx */
+ 0x0412c480,
+ 0xf50024b0,
+ 0xf100df0b,
+ 0xcf190037,
+ 0x33cf4032,
+ 0xff24e400,
+ 0x1024b607,
+ 0x07bf45e4,
+ 0xf50054b0,
+ 0xf100b90b,
+ 0xf1fae057,
+ 0xb000ce67,
+ 0x18f4c044,
+ 0xa057f14d,
+ 0x8867f1fc,
+ 0x8044b000,
+ 0xb03f18f4,
+ 0x18f46044,
+ 0x5044b019,
+ 0xf1741bf4,
+ 0xbd220027,
+ 0x0233f034,
+ 0xf50023d0,
+/* 0x0103: dma_cmd */
+ 0xb000810e,
+ 0x18f46344,
+ 0x0245945e,
+ 0xfe8050b7,
+ 0x801e39f0,
+ 0x40b70053,
+ 0x44b60120,
+ 0x0043d008,
+/* 0x0123: dtable_cmd */
+ 0xb8600ef4,
+ 0x18f40446,
+ 0x0344b63e,
+ 0x980045bb,
+ 0x53fd0145,
+ 0x0054b004,
+ 0x58291bf4,
+ 0x46580045,
+ 0x0264b001,
+ 0x98170bf4,
+ 0x67fd0807,
+ 0x0164b004,
+ 0xf9300bf4,
+ 0x0f01f455,
+/* 0x015b: cmd_setctx */
+ 0x80280ef4,
+ 0x0ef40053,
+/* 0x0161: invalid_bitfield */
+ 0x0125f022,
+/* 0x0164: dispatch_error */
+/* 0x0164: illegal_mthd */
+ 0x100047f1,
+ 0xd00042d0,
+ 0x47f04043,
+ 0x0004d040,
+/* 0x0174: im_loop */
+ 0xf08004cf,
+ 0x44b04044,
+ 0xf71bf400,
+/* 0x0180: cmddone */
+ 0x1d0037f1,
+ 0xd00147f0,
+/* 0x018a: nocmd */
+ 0x11c40034,
+ 0x4001d00c,
+/* 0x0192: cmd_query_get */
+ 0x38f201f8,
+ 0x0325f001,
+ 0x0b0047f1,
+/* 0x019c: ptimer_retry */
+ 0xcf4046cf,
+ 0x47cf0045,
+ 0x0467b840,
+ 0x98f41bf4,
+ 0x04800504,
+ 0x21008020,
+ 0x80220580,
+ 0x0bfe2306,
+ 0x03049800,
+ 0xfe1844b6,
+ 0x04980047,
+ 0x8057f104,
+ 0x0253f000,
+ 0xf80645fa,
+/* 0x01d7: cmd_cond_mode */
+ 0xf400f803,
+ 0x25f00131,
+ 0x0534b002,
+ 0xf41218f4,
+ 0x34b00132,
+ 0x0b18f402,
+ 0x800136f0,
+/* 0x01f2: return */
+ 0x00f80803,
+/* 0x01f4: cmd_cond_mode_queryful */
+ 0x98060498,
+ 0x56c40705,
+ 0x0855b6ff,
+ 0xfd1844b6,
+ 0x47fe0545,
+ 0x000bfe00,
+ 0x008057f1,
+ 0xfa0253f0,
+ 0x34b00565,
+ 0x131bf402,
+ 0x049803f8,
+ 0x0044b021,
+ 0x800b4cf0,
+ 0x00f80804,
+/* 0x022c: cmd_cond_mode_double */
+ 0xb61060b6,
+ 0x65fa1050,
+ 0x9803f805,
+ 0x06982005,
+ 0x0456b824,
+ 0x980b4cf0,
+ 0x06982105,
+ 0x0456b825,
+ 0xfd0b5cf0,
+ 0x34b00445,
+ 0x0b5cf003,
+ 0x800645fd,
+ 0x00f80804,
+/* 0x0260: cmd_wrcache_flush */
+ 0xf10132f4,
+ 0xbd220027,
+ 0x0133f034,
+ 0xf80023d0,
+/* 0x0271: crypt_cmd_mode */
+ 0x0131f400,
+ 0xb00225f0,
+ 0x18f40f34,
+ 0x0132f409,
+/* 0x0283: crypt_cmd_mode_return */
+ 0xf80d0380,
+/* 0x0285: crypt_cmd_length */
+ 0x0034b000,
+ 0xf4fb0bf4,
+ 0x47f0033c,
+ 0x0743f040,
+ 0xf00604fa,
+ 0x43f05047,
+ 0x0604fa06,
+ 0x3cf503f8,
+ 0x47f1c407,
+ 0x4bfe2100,
+ 0x09049800,
+ 0x950a0598,
+ 0x44b60858,
+ 0x0548fd18,
+ 0x98ff55c4,
+ 0x07980b06,
+ 0x0878950c,
+ 0xfd1864b6,
+ 0x77c40568,
+ 0x0d0898ff,
+ 0x580284b6,
+ 0x95f9a889,
+ 0xf9a98958,
+ 0x013cf495,
+ 0x3cf403f8,
+ 0xf803f861,
+ 0x18489503,
+ 0xbb084994,
+ 0x81b60095,
+ 0x09088000,
+ 0x950a0980,
+ 0x69941868,
+ 0x0097bb08,
+ 0x800081b6,
+ 0x09800b08,
+ 0x023cf40c,
+ 0xf05047f0,
+ 0x04fa0643,
+ 0xf803f805,
+/* 0x0321: crypt_copy_prep */
+ 0x203cf500,
+ 0x003cf594,
+ 0x003cf588,
+/* 0x032f: crypt_store_prep */
+ 0xf500f88c,
+ 0xf594103c,
+ 0xf88c063c,
+/* 0x0339: crypt_ecb_e_prep */
+ 0x303cf500,
+ 0x003cf594,
+ 0x003cf588,
+ 0x003cf5d0,
+/* 0x034b: crypt_ecb_d_prep */
+ 0xf500f88c,
+ 0xf5c8773c,
+ 0xf594303c,
+ 0xf588003c,
+ 0xf5d4003c,
+ 0xf88c003c,
+/* 0x0361: crypt_cbc_e_prep */
+ 0x403cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+ 0x063cf5d0,
+/* 0x0377: crypt_cbc_d_prep */
+ 0xf500f88c,
+ 0xf5c8773c,
+ 0xf594503c,
+ 0xf584623c,
+ 0xf588063c,
+ 0xf5d4603c,
+ 0xf5ac203c,
+ 0xf88c003c,
+/* 0x0395: crypt_pcbc_e_prep */
+ 0x503cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+ 0x063cf5d0,
+ 0x063cf58c,
+/* 0x03af: crypt_pcbc_d_prep */
+ 0xf500f8ac,
+ 0xf5c8773c,
+ 0xf594503c,
+ 0xf588003c,
+ 0xf5d4013c,
+ 0xf5ac163c,
+ 0xf58c063c,
+ 0xf8ac063c,
+/* 0x03cd: crypt_cfb_e_prep */
+ 0x403cf500,
+ 0x663cf594,
+ 0x003cf5d0,
+ 0x063cf588,
+ 0x063cf5ac,
+/* 0x03e3: crypt_cfb_d_prep */
+ 0xf500f88c,
+ 0xf594403c,
+ 0xf5d0603c,
+ 0xf588063c,
+ 0xf5ac603c,
+ 0xf88c003c,
+/* 0x03f9: crypt_ofb_prep */
+ 0x403cf500,
+ 0x663cf594,
+ 0x003cf5d0,
+ 0x603cf588,
+ 0x003cf5ac,
+/* 0x040f: crypt_ctr_prep */
+ 0xf500f88c,
+ 0xf594503c,
+ 0xf5d0613c,
+ 0xf5b0163c,
+ 0xf588003c,
+ 0xf5ac103c,
+ 0xf88c003c,
+/* 0x0429: crypt_cbc_mac_prep */
+ 0x303cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x663cf5ac,
+/* 0x043b: crypt_cmac_finish_complete_prep */
+ 0xf500f8d0,
+ 0xf594703c,
+ 0xf588003c,
+ 0xf5ac063c,
+ 0xf5ac003c,
+ 0xf5d0003c,
+ 0xf5bc003c,
+ 0xf5ac063c,
+ 0xf8d0663c,
+/* 0x045d: crypt_cmac_finish_partial_prep */
+ 0x803cf500,
+ 0x003cf594,
+ 0x063cf588,
+ 0x003cf5ac,
+ 0x003cf5ac,
+ 0x003cf5d0,
+ 0x003cf5bc,
+ 0x063cf5bc,
+ 0x663cf5ac,
+/* 0x0483: crypt_do_in */
+ 0xbb00f8d0,
+ 0x47fe0035,
+ 0x8097f100,
+ 0x0293f000,
+/* 0x0490: crypt_do_in_loop */
+ 0xf80559fa,
+ 0x223cf403,
+ 0xf50609fa,
+ 0xf898103c,
+ 0x1050b603,
+ 0xf40453b8,
+ 0x3cf4e91b,
+ 0xf803f801,
+/* 0x04b1: crypt_do_out */
+ 0x0037bb00,
+ 0xf10067fe,
+ 0xf0008097,
+/* 0x04be: crypt_do_out_loop */
+ 0x3cf50293,
+ 0x3cf49810,
+ 0x0579fa61,
+ 0xf40679fa,
+ 0x03f8013c,
+ 0xb81070b6,
+ 0x1bf40473,
+/* 0x04db: crypt_do_inout */
+ 0xbb00f8e8,
+ 0x97f10035,
+ 0x93f00080,
+/* 0x04e5: crypt_do_inout_loop */
+ 0x0047fe02,
+ 0xf80559fa,
+ 0x213cf403,
+ 0xf50609fa,
+ 0xf498103c,
+ 0x67fe613c,
+ 0x0579fa00,
+ 0xf40679fa,
+ 0x03f8013c,
+ 0xb61050b6,
+ 0x53b81070,
+ 0xd41bf404,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include <subdev/vm.h>
+#include <core/ramht.h>
+
+struct nv84_crypt_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin;
+ struct nouveau_gpuobj *ctx;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &ctx);
+ if (ret)
+ return ret;
+
+ nv_wo32(ramin, 0xa0, 0x00190000);
+ nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1);
+ nv_wo32(ramin, 0xa8, ctx->vinst);
+ nv_wo32(ramin, 0xac, 0);
+ nv_wo32(ramin, 0xb0, 0);
+ nv_wo32(ramin, 0xb4, 0);
+ dev_priv->engine.instmem.flush(dev);
+
+ atomic_inc(&chan->vm->engref[engine]);
+ chan->engctx[engine] = ctx;
+ return 0;
+}
+
+static void
+nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nouveau_gpuobj *ctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ u32 inst;
+
+ inst = (chan->ramin->vinst >> 12);
+ inst |= 0x80000000;
+
+ /* mark context as invalid if still on the hardware, not
+ * doing this causes issues the next time PCRYPT is used,
+ * unsurprisingly :)
+ */
+ nv_wr32(dev, 0x10200c, 0x00000000);
+ if (nv_rd32(dev, 0x102188) == inst)
+ nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
+ if (nv_rd32(dev, 0x10218c) == inst)
+ nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
+ nv_wr32(dev, 0x10200c, 0x00000010);
+
+ nouveau_gpuobj_ref(NULL, &ctx);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+}
+
+static int
+nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 5;
+ obj->class = class;
+
+ nv_wo32(obj, 0x00, class);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static void
+nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
+{
+ nv50_vm_flush_engine(dev, 0x0a);
+}
+
+static struct nouveau_bitfield nv84_crypt_intr[] = {
+ { 0x00000001, "INVALID_STATE" },
+ { 0x00000002, "ILLEGAL_MTHD" },
+ { 0x00000004, "ILLEGAL_CLASS" },
+ { 0x00000080, "QUERY" },
+ { 0x00000100, "FAULT" },
+ {}
+};
+
+static void
+nv84_crypt_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x102130);
+ u32 mthd = nv_rd32(dev, 0x102190);
+ u32 data = nv_rd32(dev, 0x102194);
+ u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12;
+ int show = nouveau_ratelimit();
+ int chid = nv50_graph_isr_chid(dev, inst);
+
+ if (show) {
+ NV_INFO(dev, "PCRYPT:");
+ nouveau_bitfield_print(nv84_crypt_intr, stat);
+ printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n",
+ chid, inst, mthd, data);
+ }
+
+ nv_wr32(dev, 0x102130, stat);
+ nv_wr32(dev, 0x10200c, 0x10);
+
+ nv50_fb_vm_trap(dev, show);
+}
+
+static int
+nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ nv_wr32(dev, 0x102140, 0x00000000);
+ return 0;
+}
+
+static int
+nv84_crypt_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+ nv_wr32(dev, 0x102130, 0xffffffff);
+ nv_wr32(dev, 0x102140, 0xffffffbf);
+
+ nv_wr32(dev, 0x10200c, 0x00000010);
+ return 0;
+}
+
+static void
+nv84_crypt_destroy(struct drm_device *dev, int engine)
+{
+ struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, CRYPT);
+
+ nouveau_irq_unregister(dev, 14);
+ kfree(pcrypt);
+}
+
+int
+nv84_crypt_create(struct drm_device *dev)
+{
+ struct nv84_crypt_engine *pcrypt;
+
+ pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
+ if (!pcrypt)
+ return -ENOMEM;
+
+ pcrypt->base.destroy = nv84_crypt_destroy;
+ pcrypt->base.init = nv84_crypt_init;
+ pcrypt->base.fini = nv84_crypt_fini;
+ pcrypt->base.context_new = nv84_crypt_context_new;
+ pcrypt->base.context_del = nv84_crypt_context_del;
+ pcrypt->base.object_new = nv84_crypt_object_new;
+ pcrypt->base.tlb_flush = nv84_crypt_tlb_flush;
+
+ nouveau_irq_register(dev, 14, nv84_crypt_isr);
+
+ NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
+ NVOBJ_CLASS (dev, 0x74c1, CRYPT);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include <subdev/vm.h>
+#include <core/ramht.h>
+
+#include "fuc/nv98.fuc.h"
+
+struct nv98_crypt_priv {
+ struct nouveau_exec_engine base;
+};
+
+struct nv98_crypt_chan {
+ struct nouveau_gpuobj *mem;
+};
+
+static int
+nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv98_crypt_priv *priv = nv_engine(dev, engine);
+ struct nv98_crypt_chan *cctx;
+ int ret;
+
+ cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
+ if (!cctx)
+ return -ENOMEM;
+
+ atomic_inc(&chan->vm->engref[engine]);
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0xa0, 0x00190000);
+ nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
+ nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
+ nv_wo32(chan->ramin, 0xac, 0x00000000);
+ nv_wo32(chan->ramin, 0xb0, 0x00000000);
+ nv_wo32(chan->ramin, 0xb4, 0x00000000);
+ dev_priv->engine.instmem.flush(dev);
+
+error:
+ if (ret)
+ priv->base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv98_crypt_chan *cctx = chan->engctx[engine];
+ int i;
+
+ for (i = 0xa0; i < 0xb4; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+
+ nouveau_gpuobj_ref(NULL, &cctx->mem);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(cctx);
+}
+
+static int
+nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct nv98_crypt_chan *cctx = chan->engctx[engine];
+
+ /* fuc engine doesn't need an object, our ramht code does.. */
+ cctx->mem->engine = 5;
+ cctx->mem->class = class;
+ return nouveau_ramht_insert(chan, handle, cctx->mem);
+}
+
+static void
+nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
+{
+ nv50_vm_flush_engine(dev, 0x0a);
+}
+
+static int
+nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ return 0;
+}
+
+static int
+nv98_crypt_init(struct drm_device *dev, int engine)
+{
+ int i;
+
+ /* reset! */
+ nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
+
+ /* wait for exit interrupt to signal */
+ nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
+ nv_wr32(dev, 0x087004, 0x00000010);
+
+ /* upload microcode code and data segments */
+ nv_wr32(dev, 0x087ff8, 0x00100000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
+ nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
+
+ nv_wr32(dev, 0x087ff8, 0x00000000);
+ for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
+ nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
+
+ /* start it running */
+ nv_wr32(dev, 0x08710c, 0x00000000);
+ nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
+ nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
+ return 0;
+}
+
+static struct nouveau_enum nv98_crypt_isr_error_name[] = {
+ { 0x0000, "ILLEGAL_MTHD" },
+ { 0x0001, "INVALID_BITFIELD" },
+ { 0x0002, "INVALID_ENUM" },
+ { 0x0003, "QUERY" },
+ {}
+};
+
+static void
+nv98_crypt_isr(struct drm_device *dev)
+{
+ u32 disp = nv_rd32(dev, 0x08701c);
+ u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
+ u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
+ u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
+ u32 addr = nv_rd32(dev, 0x087040) >> 16;
+ u32 mthd = (addr & 0x07ff) << 2;
+ u32 subc = (addr & 0x3800) >> 11;
+ u32 data = nv_rd32(dev, 0x087044);
+ int chid = nv50_graph_isr_chid(dev, inst);
+
+ if (stat & 0x00000040) {
+ NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
+ nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
+ printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, mthd, data);
+ nv_wr32(dev, 0x087004, 0x00000040);
+ stat &= ~0x00000040;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
+ nv_wr32(dev, 0x087004, stat);
+ }
+
+ nv50_fb_vm_trap(dev, 1);
+}
+
+static void
+nv98_crypt_destroy(struct drm_device *dev, int engine)
+{
+ struct nv98_crypt_priv *priv = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 14);
+ NVOBJ_ENGINE_DEL(dev, CRYPT);
+ kfree(priv);
+}
+
+int
+nv98_crypt_create(struct drm_device *dev)
+{
+ struct nv98_crypt_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.destroy = nv98_crypt_destroy;
+ priv->base.init = nv98_crypt_init;
+ priv->base.fini = nv98_crypt_fini;
+ priv->base.context_new = nv98_crypt_context_new;
+ priv->base.context_del = nv98_crypt_context_del;
+ priv->base.object_new = nv98_crypt_object_new;
+ priv->base.tlb_flush = nv98_crypt_tlb_flush;
+
+ nouveau_irq_register(dev, 14, nv98_crypt_isr);
+
+ NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
+ NVOBJ_CLASS(dev, 0x88b4, CRYPT);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include <engine/fifo.h>
+#include "nouveau_util.h"
+#include <core/ramht.h>
+#include "nouveau_software.h"
+
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv04_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+struct nv04_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv04_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+bool
+nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
+{
+ int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
+
+ if (!enable) {
+ /* In some cases the PFIFO puller may be left in an
+ * inconsistent state if you try to stop it when it's
+ * busy translating handles. Sometimes you get a
+ * PFIFO_CACHE_ERROR, sometimes it just fails silently
+ * sending incorrect instance offsets to PGRAPH after
+ * it's started up again. To avoid the latter we
+ * invalidate the most recently calculated instance.
+ */
+ if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
+ NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
+ NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
+
+ if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
+ NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+ }
+
+ return pull & 1;
+}
+
+static int
+nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv04_fifo_chan *fctx;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ /* map channel control registers */
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 32, ~0, 32,
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x14, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x18, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
+
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+void
+nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv04_fifo_chan *fctx = chan->engctx[engine];
+ struct ramfc_desc *c = priv->ramfc_desc;
+ unsigned long flags;
+ int chid;
+
+ /* prevent fifo context switches */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+ /* if this channel is active, replace it with a null context */
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
+ if (chid == chan->id) {
+ nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
+
+ do {
+ u32 mask = ((1ULL << c->bits) - 1) << c->regs;
+ nv_mask(dev, c->regp, mask, 0x00000000);
+ } while ((++c)->bits);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ /* restore normal operation, after disabling dma mode */
+ nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* clean up */
+ nouveau_gpuobj_ref(NULL, &fctx->ramfc);
+ nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+}
+
+int
+nv04_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
+}
+
+int
+nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+ struct nouveau_channel *chan;
+ int chid;
+
+ /* prevent context switches and halt fifo operation */
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
+
+ /* store current fifo context in ramfc */
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
+ chan = dev_priv->channels.ptr[chid];
+ if (suspend && chid != priv->base.channels && chan) {
+ struct nv04_fifo_chan *fctx = chan->engctx[engine];
+ struct nouveau_gpuobj *ctx = fctx->ramfc;
+ struct ramfc_desc *c = priv->ramfc_desc;
+ do {
+ u32 rm = ((1ULL << c->bits) - 1) << c->regs;
+ u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
+ u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
+ u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
+ nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
+ } while ((++c)->bits);
+ }
+
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
+ return 0;
+}
+
+static bool
+nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ struct nouveau_gpuobj *obj;
+ unsigned long flags;
+ const int subc = (addr >> 13) & 0x7;
+ const int mthd = addr & 0x1ffc;
+ bool handled = false;
+ u32 engine;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (likely(chid >= 0 && chid < pfifo->channels))
+ chan = dev_priv->channels.ptr[chid];
+ if (unlikely(!chan))
+ goto out;
+
+ switch (mthd) {
+ case 0x0000: /* bind object to subchannel */
+ obj = nouveau_ramht_find(chan, data);
+ if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
+ break;
+
+ engine = 0x0000000f << (subc * 4);
+
+ nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
+ handled = true;
+ break;
+ default:
+ engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
+ if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
+ break;
+
+ if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
+ mthd, data))
+ handled = true;
+ break;
+ }
+
+out:
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return handled;
+}
+
+static const char *nv_dma_state_err(u32 state)
+{
+ static const char * const desc[] = {
+ "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
+ "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
+ };
+ return desc[(state >> 29) & 0x7];
+}
+
+void
+nv04_fifo_isr(struct drm_device *dev)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t status, reassign;
+ int cnt = 0;
+
+ reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
+ while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
+ uint32_t chid, get;
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, 0);
+
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
+ get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
+
+ if (status & NV_PFIFO_INTR_CACHE_ERROR) {
+ uint32_t mthd, data;
+ int ptr;
+
+ /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
+ * wrapping on my G80 chips, but CACHE1 isn't big
+ * enough for this much data.. Tests show that it
+ * wraps around to the start at GET=0x800.. No clue
+ * as to why..
+ */
+ ptr = (get & 0x7ff) >> 2;
+
+ if (dev_priv->card_type < NV_40) {
+ mthd = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV04_PFIFO_CACHE1_DATA(ptr));
+ } else {
+ mthd = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_METHOD(ptr));
+ data = nv_rd32(dev,
+ NV40_PFIFO_CACHE1_DATA(ptr));
+ }
+
+ if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
+ NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
+ "Mthd 0x%04x Data 0x%08x\n",
+ chid, (mthd >> 13) & 7, mthd & 0x1ffc,
+ data);
+ }
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_CACHE_ERROR);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
+ nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+
+ nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
+ nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+
+ status &= ~NV_PFIFO_INTR_CACHE_ERROR;
+ }
+
+ if (status & NV_PFIFO_INTR_DMA_PUSHER) {
+ u32 dma_get = nv_rd32(dev, 0x003244);
+ u32 dma_put = nv_rd32(dev, 0x003240);
+ u32 push = nv_rd32(dev, 0x003220);
+ u32 state = nv_rd32(dev, 0x003228);
+
+ if (dev_priv->card_type == NV_50) {
+ u32 ho_get = nv_rd32(dev, 0x003328);
+ u32 ho_put = nv_rd32(dev, 0x003320);
+ u32 ib_get = nv_rd32(dev, 0x003334);
+ u32 ib_put = nv_rd32(dev, 0x003330);
+
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+ "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+ "State 0x%08x (err: %s) Push 0x%08x\n",
+ chid, ho_get, dma_get, ho_put,
+ dma_put, ib_get, ib_put, state,
+ nv_dma_state_err(state),
+ push);
+
+ /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+ nv_wr32(dev, 0x003364, 0x00000000);
+ if (dma_get != dma_put || ho_get != ho_put) {
+ nv_wr32(dev, 0x003244, dma_put);
+ nv_wr32(dev, 0x003328, ho_put);
+ } else
+ if (ib_get != ib_put) {
+ nv_wr32(dev, 0x003334, ib_put);
+ }
+ } else {
+ NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+ "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
+ chid, dma_get, dma_put, state,
+ nv_dma_state_err(state), push);
+
+ if (dma_get != dma_put)
+ nv_wr32(dev, 0x003244, dma_put);
+ }
+
+ nv_wr32(dev, 0x003228, 0x00000000);
+ nv_wr32(dev, 0x003220, 0x00000001);
+ nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+ status &= ~NV_PFIFO_INTR_DMA_PUSHER;
+ }
+
+ if (status & NV_PFIFO_INTR_SEMAPHORE) {
+ uint32_t sem;
+
+ status &= ~NV_PFIFO_INTR_SEMAPHORE;
+ nv_wr32(dev, NV03_PFIFO_INTR_0,
+ NV_PFIFO_INTR_SEMAPHORE);
+
+ sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
+ nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ }
+
+ if (dev_priv->card_type == NV_50) {
+ if (status & 0x00000010) {
+ nv50_fb_vm_trap(dev, nouveau_ratelimit());
+ status &= ~0x00000010;
+ nv_wr32(dev, 0x002100, 0x00000010);
+ }
+ }
+
+ if (status) {
+ if (nouveau_ratelimit())
+ NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
+ status, chid);
+ nv_wr32(dev, NV03_PFIFO_INTR_0, status);
+ status = 0;
+ }
+
+ nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
+ }
+
+ if (status) {
+ NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
+ nv_wr32(dev, 0x2140, 0);
+ nv_wr32(dev, 0x140, 0);
+ }
+
+ nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
+}
+
+void
+nv04_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 8);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv04_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv04_fifo_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv04_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv04_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 15;
+ priv->ramfc_desc = nv04_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include <engine/fifo.h>
+#include "nouveau_util.h"
+#include <core/ramht.h>
+
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv10_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ {}
+};
+
+struct nv10_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv10_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv10_fifo_chan *fctx;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ /* map channel control registers */
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 32, ~0, 32,
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x08, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x10, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x18, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
+
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+int
+nv10_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_fifo_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv04_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv10_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv10_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include <engine/fifo.h>
+#include "nouveau_util.h"
+#include <core/ramht.h>
+
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv17_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ {}
+};
+
+struct nv17_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv17_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv17_fifo_chan *fctx;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ /* map channel control registers */
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 64, ~0, 64,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv17_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
+ nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+ nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
+ dev_priv->ramfc->pinst >> 8);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
+}
+
+int
+nv17_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv17_fifo_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv17_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv17_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv17_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include <engine/fifo.h>
+#include "nouveau_util.h"
+#include <core/ramht.h>
+
+static struct ramfc_desc {
+ unsigned bits:6;
+ unsigned ctxs:5;
+ unsigned ctxp:8;
+ unsigned regs:5;
+ unsigned regp;
+} nv40_ramfc[] = {
+ { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
+ { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
+ { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
+ { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
+ { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
+ { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
+ { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
+ { 2, 28, 0x18, 28, 0x002058 },
+ { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
+ { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
+ { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
+ { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
+ { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
+ { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
+ { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
+ { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
+ { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
+ { 32, 0, 0x40, 0, 0x0032e4 },
+ { 32, 0, 0x44, 0, 0x0032e8 },
+ { 32, 0, 0x4c, 0, 0x002088 },
+ { 32, 0, 0x50, 0, 0x003300 },
+ { 32, 0, 0x54, 0, 0x00330c },
+ {}
+};
+
+struct nv40_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct ramfc_desc *ramfc_desc;
+};
+
+struct nv40_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+};
+
+static int
+nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv40_fifo_priv *priv = nv_engine(dev, engine);
+ struct nv40_fifo_chan *fctx;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ /* map channel control registers */
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV03_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* initialise default fifo context */
+ ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
+ chan->id * 128, ~0, 128,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
+ nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
+ nv_wo32(fctx->ramfc, 0x18, 0x30000000 |
+ NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
+ NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
+#ifdef __BIG_ENDIAN
+ NV_PFIFO_CACHE1_BIG_ENDIAN |
+#endif
+ NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
+ nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff);
+
+ /* enable dma mode on the channel */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /*XXX: remove this later, need fifo engine context commit hook */
+ nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static int
+nv40_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv40_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
+ nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
+
+ nv_wr32(dev, 0x002040, 0x000000ff);
+ nv_wr32(dev, 0x002044, 0x2101ffff);
+ nv_wr32(dev, 0x002058, 0x00000001);
+
+ nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
+ ((dev_priv->ramht->bits - 9) << 16) |
+ (dev_priv->ramht->gpuobj->pinst >> 8));
+ nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
+
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ nv_wr32(dev, 0x002230, 0x00000001);
+ case 0x40:
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x45:
+ case 0x48:
+ nv_wr32(dev, 0x002220, 0x00030002);
+ break;
+ default:
+ nv_wr32(dev, 0x002230, 0x00000000);
+ nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 +
+ dev_priv->ramfc->pinst) >> 16) |
+ 0x00030000);
+ break;
+ }
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
+
+ nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
+ nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
+
+ nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
+ nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
+ nv_wr32(dev, NV03_PFIFO_CACHES, 1);
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (dev_priv->channels.ptr[i])
+ nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
+ }
+
+ return 0;
+}
+
+int
+nv40_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv40_fifo_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv04_fifo_destroy;
+ priv->base.base.init = nv40_fifo_init;
+ priv->base.base.fini = nv04_fifo_fini;
+ priv->base.base.context_new = nv40_fifo_context_new;
+ priv->base.base.context_del = nv04_fifo_context_del;
+ priv->base.channels = 31;
+ priv->ramfc_desc = nv40_ramfc;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include <engine/fifo.h>
+#include <core/ramht.h>
+#include <subdev/vm.h>
+
+struct nv50_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv50_fifo_chan {
+ struct nouveau_fifo_chan base;
+};
+
+void
+nv50_fifo_playlist_update(struct drm_device *dev)
+{
+ struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *cur;
+ int i, p;
+
+ cur = priv->playlist[priv->cur_playlist];
+ priv->cur_playlist = !priv->cur_playlist;
+
+ for (i = 0, p = 0; i < priv->base.channels; i++) {
+ if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
+ nv_wo32(cur, p++ * 4, i);
+ }
+
+ dev_priv->engine.instmem.flush(dev);
+
+ nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
+ nv_wr32(dev, 0x0032ec, p);
+ nv_wr32(dev, 0x002500, 0x00000101);
+}
+
+static int
+nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv50_fifo_chan *fctx;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
+ u64 instance = chan->ramin->vinst >> 12;
+ unsigned long flags;
+ int ret = 0, i;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ atomic_inc(&chan->vm->engref[engine]);
+
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x3c, 0x403f6078);
+ nv_wo32(chan->ramin, 0x40, 0x00000000);
+ nv_wo32(chan->ramin, 0x44, 0x01003fff);
+ nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
+ nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(chan->ramin, 0x60, 0x7fffffff);
+ nv_wo32(chan->ramin, 0x78, 0x00000000);
+ nv_wo32(chan->ramin, 0x7c, 0x30000001);
+ nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->cinst >> 4));
+
+ dev_priv->engine.instmem.flush(dev);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
+ nv50_fifo_playlist_update(dev);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static bool
+nv50_fifo_kickoff(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ bool done = true;
+ u32 me;
+
+ /* HW bug workaround:
+ *
+ * PFIFO will hang forever if the connected engines don't report
+ * that they've processed the context switch request.
+ *
+ * In order for the kickoff to work, we need to ensure all the
+ * connected engines are in a state where they can answer.
+ *
+ * Newer chipsets don't seem to suffer from this issue, and well,
+ * there's also a "ignore these engines" bitmask reg we can use
+ * if we hit the issue there..
+ */
+
+ /* PME: make sure engine is enabled */
+ me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
+
+ /* do the kickoff... */
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+ done = false;
+ }
+
+ /* restore any engine states we changed, and exit */
+ nv_wr32(dev, 0x00b860, me);
+ return done;
+}
+
+static void
+nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv50_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ /* remove channel from playlist, will context switch if active */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
+ nv50_fifo_playlist_update(dev);
+
+ /* tell any engines on this channel to unload their contexts */
+ nv50_fifo_kickoff(chan);
+
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* clean up */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv50_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 instance;
+ int i;
+
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x00250c, 0x6f3cfc34);
+ nv_wr32(dev, 0x002044, 0x01003fff);
+
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+
+ for (i = 0; i < 128; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && chan->engctx[engine])
+ instance = 0x80000000 | chan->ramin->vinst >> 12;
+ else
+ instance = 0x00000000;
+ nv_wr32(dev, 0x002600 + (i * 4), instance);
+ }
+
+ nv50_fifo_playlist_update(dev);
+
+ nv_wr32(dev, 0x003200, 1);
+ nv_wr32(dev, 0x003250, 1);
+ nv_wr32(dev, 0x002500, 1);
+ return 0;
+}
+
+static int
+nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ /* set playlist length to zero, fifo will unload context */
+ nv_wr32(dev, 0x0032ec, 0);
+
+ /* tell all connected engines to unload their contexts */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && !nv50_fifo_kickoff(chan))
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x002140, 0);
+ return 0;
+}
+
+void
+nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
+{
+ nv50_vm_flush_engine(dev, 5);
+}
+
+void
+nv50_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 8);
+
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nv50_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv50_fifo_destroy;
+ priv->base.base.init = nv50_fifo_init;
+ priv->base.base.fini = nv50_fifo_fini;
+ priv->base.base.context_new = nv50_fifo_context_new;
+ priv->base.base.context_del = nv50_fifo_context_del;
+ priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
+ priv->base.channels = 127;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
--- /dev/null
+/*
+ * Copyright (C) 2012 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include <engine/fifo.h>
+#include <core/ramht.h>
+#include <subdev/vm.h>
+
+struct nv84_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nv84_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *ramfc;
+ struct nouveau_gpuobj *cache;
+};
+
+static int
+nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
+ struct nv84_fifo_chan *fctx;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
+ u64 instance;
+ unsigned long flags;
+ int ret;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+ atomic_inc(&chan->vm->engref[engine]);
+
+ chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
+ NV50_USER(chan->id), PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
+ if (ret)
+ goto error;
+
+ instance = fctx->ramfc->vinst >> 8;
+
+ ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
+ if (ret)
+ goto error;
+
+ nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
+ nv_wo32(fctx->ramfc, 0x40, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
+ nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
+ nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
+ nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
+ drm_order(chan->dma.ib_max + 1) << 16);
+ nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
+ nv_wo32(fctx->ramfc, 0x78, 0x00000000);
+ nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
+ nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
+ (4 << 24) /* SEARCH_FULL */ |
+ (chan->ramht->gpuobj->cinst >> 4));
+ nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
+ nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
+
+ nv_wo32(chan->ramin, 0x00, chan->id);
+ nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
+
+ dev_priv->engine.instmem.flush(dev);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
+ nv50_fifo_playlist_update(dev);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv84_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+ u32 save;
+
+ /* remove channel from playlist, will context switch if active */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
+ nv50_fifo_playlist_update(dev);
+
+ save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+
+ /* tell any engines on this channel to unload their contexts */
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
+
+ nv_wr32(dev, 0x002520, save);
+
+ nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* clean up */
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ nouveau_gpuobj_ref(NULL, &fctx->ramfc);
+ nouveau_gpuobj_ref(NULL, &fctx->cache);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nv84_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_chan *fctx;
+ u32 instance;
+ int i;
+
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x00250c, 0x6f3cfc34);
+ nv_wr32(dev, 0x002044, 0x01003fff);
+
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xffffffff);
+
+ for (i = 0; i < 128; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan && (fctx = chan->engctx[engine]))
+ instance = 0x80000000 | fctx->ramfc->vinst >> 8;
+ else
+ instance = 0x00000000;
+ nv_wr32(dev, 0x002600 + (i * 4), instance);
+ }
+
+ nv50_fifo_playlist_update(dev);
+
+ nv_wr32(dev, 0x003200, 1);
+ nv_wr32(dev, 0x003250, 1);
+ nv_wr32(dev, 0x002500, 1);
+ return 0;
+}
+
+static int
+nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+ u32 save;
+
+ /* set playlist length to zero, fifo will unload context */
+ nv_wr32(dev, 0x0032ec, 0);
+
+ save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
+
+ /* tell all connected engines to unload their contexts */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (chan)
+ nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
+ if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
+ NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002520, save);
+ nv_wr32(dev, 0x002140, 0);
+ return 0;
+}
+
+int
+nv84_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv84_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nv50_fifo_destroy;
+ priv->base.base.init = nv84_fifo_init;
+ priv->base.base.fini = nv84_fifo_fini;
+ priv->base.base.context_new = nv84_fifo_context_new;
+ priv->base.base.context_del = nv84_fifo_context_del;
+ priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
+ priv->base.channels = 127;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nv04_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include <core/mm.h>
+#include <engine/fifo.h>
+
+static void nvc0_fifo_isr(struct drm_device *);
+
+struct nvc0_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+ struct nouveau_vma user_vma;
+ int spoon_nr;
+};
+
+struct nvc0_fifo_chan {
+ struct nouveau_fifo_chan base;
+ struct nouveau_gpuobj *user;
+};
+
+static void
+nvc0_fifo_playlist_update(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct nouveau_gpuobj *cur;
+ int i, p;
+
+ cur = priv->playlist[priv->cur_playlist];
+ priv->cur_playlist = !priv->cur_playlist;
+
+ for (i = 0, p = 0; i < 128; i++) {
+ if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
+ continue;
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000004);
+ p += 8;
+ }
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x002270, cur->vinst >> 12);
+ nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
+ if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
+ NV_ERROR(dev, "PFIFO - playlist update failed\n");
+}
+
+static int
+nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nvc0_fifo_chan *fctx;
+ u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
+ int ret, i;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
+ priv->user_vma.offset + (chan->id * 0x1000),
+ PAGE_SIZE);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /* allocate vram for control regs, map into polling area */
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
+ if (ret)
+ goto error;
+
+ nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
+ *(struct nouveau_mem **)fctx->user->node);
+
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
+ nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
+ nv_wo32(chan->ramin, 0x10, 0x0000face);
+ nv_wo32(chan->ramin, 0x30, 0xfffff902);
+ nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ upper_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x54, 0x00000002);
+ nv_wo32(chan->ramin, 0x84, 0x20400000);
+ nv_wo32(chan->ramin, 0x94, 0x30000001);
+ nv_wo32(chan->ramin, 0x9c, 0x00000100);
+ nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
+ nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
+ nv_wo32(chan->ramin, 0xac, 0x0000001f);
+ nv_wo32(chan->ramin, 0xb8, 0xf8000000);
+ nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
+ (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
+ nvc0_fifo_playlist_update(dev);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nvc0_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+
+ nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x002634, chan->id);
+ if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
+ NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
+ nvc0_fifo_playlist_update(dev);
+ nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
+
+ nouveau_gpuobj_ref(NULL, &fctx->user);
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ chan->engctx[engine] = NULL;
+ kfree(fctx);
+}
+
+static int
+nvc0_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nouveau_channel *chan;
+ int i;
+
+ /* reset PFIFO, enable all available PSUBFIFO areas */
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x000204, 0xffffffff);
+ nv_wr32(dev, 0x002204, 0xffffffff);
+
+ priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
+ NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+
+ /* assign engines to subfifos */
+ if (priv->spoon_nr >= 3) {
+ nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
+ nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
+ nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
+ nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
+ nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
+ nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
+ }
+
+ /* PSUBFIFO[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
+ }
+
+ nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
+
+ nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xbfffffff);
+
+ /* restore PFIFO context table */
+ for (i = 0; i < 128; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->engctx[engine])
+ continue;
+
+ nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
+ (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
+ }
+ nvc0_fifo_playlist_update(dev);
+
+ return 0;
+}
+
+static int
+nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ int i;
+
+ for (i = 0; i < 128; i++) {
+ if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
+ continue;
+
+ nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x002634, i);
+ if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
+ NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
+ i, nv_rd32(dev, 0x002634));
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0x00000000);
+ return 0;
+}
+
+
+struct nouveau_enum nvc0_fifo_fault_unit[] = {
+ { 0x00, "PGRAPH" },
+ { 0x03, "PEEPHOLE" },
+ { 0x04, "BAR1" },
+ { 0x05, "BAR3" },
+ { 0x07, "PFIFO" },
+ { 0x10, "PBSP" },
+ { 0x11, "PPPP" },
+ { 0x13, "PCOUNTER" },
+ { 0x14, "PVP" },
+ { 0x15, "PCOPY0" },
+ { 0x16, "PCOPY1" },
+ { 0x17, "PDAEMON" },
+ {}
+};
+
+struct nouveau_enum nvc0_fifo_fault_reason[] = {
+ { 0x00, "PT_NOT_PRESENT" },
+ { 0x01, "PT_TOO_SHORT" },
+ { 0x02, "PAGE_NOT_PRESENT" },
+ { 0x03, "VM_LIMIT_EXCEEDED" },
+ { 0x04, "NO_CHANNEL" },
+ { 0x05, "PAGE_SYSTEM_ONLY" },
+ { 0x06, "PAGE_READ_ONLY" },
+ { 0x0a, "COMPRESSED_SYSRAM" },
+ { 0x0c, "INVALID_STORAGE_TYPE" },
+ {}
+};
+
+struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
+ { 0x01, "PCOPY0" },
+ { 0x02, "PCOPY1" },
+ { 0x04, "DISPATCH" },
+ { 0x05, "CTXCTL" },
+ { 0x06, "PFIFO" },
+ { 0x07, "BAR_READ" },
+ { 0x08, "BAR_WRITE" },
+ { 0x0b, "PVP" },
+ { 0x0c, "PPPP" },
+ { 0x0d, "PBSP" },
+ { 0x11, "PCOUNTER" },
+ { 0x12, "PDAEMON" },
+ { 0x14, "CCACHE" },
+ { 0x15, "CCACHE_POST" },
+ {}
+};
+
+struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
+ { 0x01, "TEX" },
+ { 0x0c, "ESETUP" },
+ { 0x0e, "CTXCTL" },
+ { 0x0f, "PROP" },
+ {}
+};
+
+struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
+/* { 0x00008000, "" } seen with null ib push */
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+{
+ u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+ u32 client = (stat & 0x00001f00) >> 8;
+
+ NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
+ (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+ nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
+ printk("] from ");
+ nouveau_enum_print(nvc0_fifo_fault_unit, unit);
+ if (stat & 0x00000040) {
+ printk("/");
+ nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
+ } else {
+ printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
+ }
+ printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static int
+nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
+{
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (likely(chid >= 0 && chid < priv->base.channels)) {
+ chan = dev_priv->channels.ptr[chid];
+ if (likely(chan))
+ ret = nouveau_finish_page_flip(chan, NULL);
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return ret;
+}
+
+static void
+nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+{
+ u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 show = stat;
+
+ if (stat & 0x00200000) {
+ if (mthd == 0x0054) {
+ if (!nvc0_fifo_page_flip(dev, chid))
+ show &= ~0x00200000;
+ }
+ }
+
+ if (show) {
+ NV_INFO(dev, "PFIFO%d:", unit);
+ nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
+ NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+ }
+
+ nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nvc0_fifo_isr(struct drm_device *dev)
+{
+ u32 mask = nv_rd32(dev, 0x002140);
+ u32 stat = nv_rd32(dev, 0x002100) & mask;
+
+ if (stat & 0x00000100) {
+ NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
+ nv_wr32(dev, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x10000000) {
+ u32 units = nv_rd32(dev, 0x00259c);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nvc0_fifo_isr_vm_fault(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x00259c, units);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 units = nv_rd32(dev, 0x0025a0);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nvc0_fifo_isr_subfifo_intr(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x0025a0, units);
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
+ nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
+ nv_wr32(dev, 0x002100, stat);
+ nv_wr32(dev, 0x002140, 0);
+ }
+}
+
+static void
+nvc0_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_vm_put(&priv->user_vma);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
+ nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nvc0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nvc0_fifo_destroy;
+ priv->base.base.init = nvc0_fifo_init;
+ priv->base.base.fini = nvc0_fifo_fini;
+ priv->base.base.context_new = nvc0_fifo_context_new;
+ priv->base.base.context_del = nvc0_fifo_context_del;
+ priv->base.channels = 128;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
+ 12, NV_MEM_ACCESS_RW, &priv->user_vma);
+ if (ret)
+ goto error;
+
+ nouveau_irq_register(dev, 8, nvc0_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include <core/mm.h>
+#include <engine/fifo.h>
+
+#define NVE0_FIFO_ENGINE_NUM 32
+
+static void nve0_fifo_isr(struct drm_device *);
+
+struct nve0_fifo_engine {
+ struct nouveau_gpuobj *playlist[2];
+ int cur_playlist;
+};
+
+struct nve0_fifo_priv {
+ struct nouveau_fifo_priv base;
+ struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
+ struct {
+ struct nouveau_gpuobj *mem;
+ struct nouveau_vma bar;
+ } user;
+ int spoon_nr;
+};
+
+struct nve0_fifo_chan {
+ struct nouveau_fifo_chan base;
+ u32 engine;
+};
+
+static void
+nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct nve0_fifo_engine *peng = &priv->engine[engine];
+ struct nouveau_gpuobj *cur;
+ u32 match = (engine << 16) | 0x00000001;
+ int ret, i, p;
+
+ cur = peng->playlist[peng->cur_playlist];
+ if (unlikely(cur == NULL)) {
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
+ if (ret) {
+ NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
+ return;
+ }
+
+ peng->playlist[peng->cur_playlist] = cur;
+ }
+
+ peng->cur_playlist = !peng->cur_playlist;
+
+ for (i = 0, p = 0; i < priv->base.channels; i++) {
+ u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
+ if (ctrl != match)
+ continue;
+ nv_wo32(cur, p + 0, i);
+ nv_wo32(cur, p + 4, 0x00000000);
+ p += 8;
+ }
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x002270, cur->vinst >> 12);
+ nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
+ if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+ NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
+}
+
+static int
+nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nve0_fifo_chan *fctx;
+ u64 usermem = priv->user.mem->vinst + chan->id * 512;
+ u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
+ int ret = 0, i;
+
+ fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
+ if (!fctx)
+ return -ENOMEM;
+
+ fctx->engine = 0; /* PGRAPH */
+
+ /* allocate vram for control regs, map into polling area */
+ chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
+ priv->user.bar.offset + (chan->id * 512), 512);
+ if (!chan->user) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ for (i = 0; i < 0x100; i += 4)
+ nv_wo32(chan->ramin, i, 0x00000000);
+ nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
+ nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
+ nv_wo32(chan->ramin, 0x10, 0x0000face);
+ nv_wo32(chan->ramin, 0x30, 0xfffff902);
+ nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
+ upper_32_bits(ib_virt));
+ nv_wo32(chan->ramin, 0x84, 0x20400000);
+ nv_wo32(chan->ramin, 0x94, 0x30000001);
+ nv_wo32(chan->ramin, 0x9c, 0x00000100);
+ nv_wo32(chan->ramin, 0xac, 0x0000001f);
+ nv_wo32(chan->ramin, 0xe4, 0x00000000);
+ nv_wo32(chan->ramin, 0xe8, chan->id);
+ nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
+ nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
+ pinstmem->flush(dev);
+
+ nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
+ (chan->ramin->vinst >> 12));
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
+
+error:
+ if (ret)
+ priv->base.base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nve0_fifo_chan *fctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+
+ nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
+ nv_wr32(dev, 0x002634, chan->id);
+ if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
+ NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
+
+ if (chan->user) {
+ iounmap(chan->user);
+ chan->user = NULL;
+ }
+
+ chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
+ kfree(fctx);
+}
+
+static int
+nve0_fifo_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ struct nve0_fifo_chan *fctx;
+ int i;
+
+ /* reset PFIFO, enable all available PSUBFIFO areas */
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x000204, 0xffffffff);
+
+ priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
+ NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
+
+ /* PSUBFIFO[n] */
+ for (i = 0; i < priv->spoon_nr; i++) {
+ nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
+ nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
+ nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
+ }
+
+ nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
+
+ nv_wr32(dev, 0x002a00, 0xffffffff);
+ nv_wr32(dev, 0x002100, 0xffffffff);
+ nv_wr32(dev, 0x002140, 0xbfffffff);
+
+ /* restore PFIFO context table */
+ for (i = 0; i < priv->base.channels; i++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[i];
+ if (!chan || !(fctx = chan->engctx[engine]))
+ continue;
+
+ nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
+ (chan->ramin->vinst >> 12));
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
+ nve0_fifo_playlist_update(dev, fctx->engine);
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
+ }
+
+ return 0;
+}
+
+static int
+nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ for (i = 0; i < priv->base.channels; i++) {
+ if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
+ continue;
+
+ nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
+ nv_wr32(dev, 0x002634, i);
+ if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
+ NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
+ i, nv_rd32(dev, 0x002634));
+ return -EBUSY;
+ }
+ }
+
+ nv_wr32(dev, 0x002140, 0x00000000);
+ return 0;
+}
+
+struct nouveau_enum nve0_fifo_fault_unit[] = {
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_reason[] = {
+ { 0x00, "PT_NOT_PRESENT" },
+ { 0x01, "PT_TOO_SHORT" },
+ { 0x02, "PAGE_NOT_PRESENT" },
+ { 0x03, "VM_LIMIT_EXCEEDED" },
+ { 0x04, "NO_CHANNEL" },
+ { 0x05, "PAGE_SYSTEM_ONLY" },
+ { 0x06, "PAGE_READ_ONLY" },
+ { 0x0a, "COMPRESSED_SYSRAM" },
+ { 0x0c, "INVALID_STORAGE_TYPE" },
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_hubclient[] = {
+ {}
+};
+
+struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
+ {}
+};
+
+struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
+ { 0x00200000, "ILLEGAL_MTHD" },
+ { 0x00800000, "EMPTY_SUBC" },
+ {}
+};
+
+static void
+nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
+{
+ u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
+ u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
+ u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
+ u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
+ u32 client = (stat & 0x00001f00) >> 8;
+
+ NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
+ (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
+ nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
+ printk("] from ");
+ nouveau_enum_print(nve0_fifo_fault_unit, unit);
+ if (stat & 0x00000040) {
+ printk("/");
+ nouveau_enum_print(nve0_fifo_fault_hubclient, client);
+ } else {
+ printk("/GPC%d/", (stat & 0x1f000000) >> 24);
+ nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
+ }
+ printk(" on channel 0x%010llx\n", (u64)inst << 12);
+}
+
+static int
+nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
+{
+ struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ if (likely(chid >= 0 && chid < priv->base.channels)) {
+ chan = dev_priv->channels.ptr[chid];
+ if (likely(chan))
+ ret = nouveau_finish_page_flip(chan, NULL);
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return ret;
+}
+
+static void
+nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
+{
+ u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
+ u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
+ u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
+ u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
+ u32 subc = (addr & 0x00070000);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 show = stat;
+
+ if (stat & 0x00200000) {
+ if (mthd == 0x0054) {
+ if (!nve0_fifo_page_flip(dev, chid))
+ show &= ~0x00200000;
+ }
+ }
+
+ if (show) {
+ NV_INFO(dev, "PFIFO%d:", unit);
+ nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
+ NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
+ unit, chid, subc, mthd, data);
+ }
+
+ nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
+ nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
+}
+
+static void
+nve0_fifo_isr(struct drm_device *dev)
+{
+ u32 mask = nv_rd32(dev, 0x002140);
+ u32 stat = nv_rd32(dev, 0x002100) & mask;
+
+ if (stat & 0x00000100) {
+ NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
+ nv_wr32(dev, 0x002100, 0x00000100);
+ stat &= ~0x00000100;
+ }
+
+ if (stat & 0x10000000) {
+ u32 units = nv_rd32(dev, 0x00259c);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nve0_fifo_isr_vm_fault(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x00259c, units);
+ stat &= ~0x10000000;
+ }
+
+ if (stat & 0x20000000) {
+ u32 units = nv_rd32(dev, 0x0025a0);
+ u32 u = units;
+
+ while (u) {
+ int i = ffs(u) - 1;
+ nve0_fifo_isr_subfifo_intr(dev, i);
+ u &= ~(1 << i);
+ }
+
+ nv_wr32(dev, 0x0025a0, units);
+ stat &= ~0x20000000;
+ }
+
+ if (stat & 0x40000000) {
+ NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
+ nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
+ stat &= ~0x40000000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
+ nv_wr32(dev, 0x002100, stat);
+ nv_wr32(dev, 0x002140, 0);
+ }
+}
+
+static void
+nve0_fifo_destroy(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv = nv_engine(dev, engine);
+ int i;
+
+ nouveau_vm_put(&priv->user.bar);
+ nouveau_gpuobj_ref(NULL, &priv->user.mem);
+
+ for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
+ nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
+ }
+
+ dev_priv->eng[engine] = NULL;
+ kfree(priv);
+}
+
+int
+nve0_fifo_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_fifo_priv *priv;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.base.destroy = nve0_fifo_destroy;
+ priv->base.base.init = nve0_fifo_init;
+ priv->base.base.fini = nve0_fifo_fini;
+ priv->base.base.context_new = nve0_fifo_context_new;
+ priv->base.base.context_del = nve0_fifo_context_del;
+ priv->base.channels = 4096;
+ dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
+
+ ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
+ 12, NV_MEM_ACCESS_RW, &priv->user.bar);
+ if (ret)
+ goto error;
+
+ nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
+
+ nouveau_irq_register(dev, 8, nve0_fifo_isr);
+error:
+ if (ret)
+ priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
+ return ret;
+}
--- /dev/null
+#ifndef __NOUVEAU_GRCTX_H__
+#define __NOUVEAU_GRCTX_H__
+
+struct nouveau_grctx {
+ struct drm_device *dev;
+
+ enum {
+ NOUVEAU_GRCTX_PROG,
+ NOUVEAU_GRCTX_VALS
+ } mode;
+ void *data;
+
+ uint32_t ctxprog_max;
+ uint32_t ctxprog_len;
+ uint32_t ctxprog_reg;
+ int ctxprog_label[32];
+ uint32_t ctxvals_pos;
+ uint32_t ctxvals_base;
+};
+
+static inline void
+cp_out(struct nouveau_grctx *ctx, uint32_t inst)
+{
+ uint32_t *ctxprog = ctx->data;
+
+ if (ctx->mode != NOUVEAU_GRCTX_PROG)
+ return;
+
+ BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
+ ctxprog[ctx->ctxprog_len++] = inst;
+}
+
+static inline void
+cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
+{
+ cp_out(ctx, CP_LOAD_SR | val);
+}
+
+static inline void
+cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
+{
+ ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
+
+ ctx->ctxvals_base = ctx->ctxvals_pos;
+ ctx->ctxvals_pos = ctx->ctxvals_base + length;
+
+ if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) {
+ cp_lsr(ctx, length);
+ length = 0;
+ }
+
+ cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg);
+}
+
+static inline void
+cp_name(struct nouveau_grctx *ctx, int name)
+{
+ uint32_t *ctxprog = ctx->data;
+ int i;
+
+ if (ctx->mode != NOUVEAU_GRCTX_PROG)
+ return;
+
+ ctx->ctxprog_label[name] = ctx->ctxprog_len;
+ for (i = 0; i < ctx->ctxprog_len; i++) {
+ if ((ctxprog[i] & 0xfff00000) != 0xff400000)
+ continue;
+ if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT))
+ continue;
+ ctxprog[i] = (ctxprog[i] & 0x00ff00ff) |
+ (ctx->ctxprog_len << CP_BRA_IP_SHIFT);
+ }
+}
+
+static inline void
+_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
+{
+ int ip = 0;
+
+ if (mod != 2) {
+ ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT;
+ if (ip == 0)
+ ip = 0xff000000 | (name << CP_BRA_IP_SHIFT);
+ }
+
+ cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
+ (state ? 0 : CP_BRA_IF_CLEAR));
+}
+#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
+#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
+#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
+
+static inline void
+_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
+{
+ cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
+}
+#define cp_wait(c, f, s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
+
+static inline void
+_cp_set(struct nouveau_grctx *ctx, int flag, int state)
+{
+ cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
+}
+#define cp_set(c, f, s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
+
+static inline void
+cp_pos(struct nouveau_grctx *ctx, int offset)
+{
+ ctx->ctxvals_pos = offset;
+ ctx->ctxvals_base = ctx->ctxvals_pos;
+
+ cp_lsr(ctx, ctx->ctxvals_pos);
+ cp_out(ctx, CP_SET_CONTEXT_POINTER);
+}
+
+static inline void
+gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
+{
+ if (ctx->mode != NOUVEAU_GRCTX_VALS)
+ return;
+
+ reg = (reg - 0x00400000) / 4;
+ reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
+
+ nv_wo32(ctx->data, reg * 4, val);
+}
+
+#endif
--- /dev/null
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* NVIDIA context programs handle a number of other conditions which are
+ * not implemented in our versions. It's not clear why NVIDIA context
+ * programs have this code, nor whether it's strictly necessary for
+ * correct operation. We'll implement additional handling if/when we
+ * discover it's necessary.
+ *
+ * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state"
+ * flag is set, this gets saved into the context.
+ * - On context save, the context program for all cards load nsource
+ * into a flag register and check for ILLEGAL_MTHD. If it's set,
+ * opcode 0x60000d is called before resuming normal operation.
+ * - Some context programs check more conditions than the above. NV44
+ * checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001))
+ * and calls 0x60000d before resuming normal operation.
+ * - At the very beginning of NVIDIA's context programs, flag 9 is checked
+ * and if true 0x800001 is called with count=0, pos=0, the flag is cleared
+ * and then the ctxprog is aborted. It looks like a complicated NOP,
+ * its purpose is unknown.
+ * - In the section of code that loads the per-vs state, NVIDIA check
+ * flag 10. If it's set, they only transfer the small 0x300 byte block
+ * of state + the state for a single vs as opposed to the state for
+ * all vs units. It doesn't seem likely that it'll occur in normal
+ * operation, especially seeing as it appears NVIDIA may have screwed
+ * up the ctxprogs for some cards and have an invalid instruction
+ * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
+ * - There's a number of places where context offset 0 (where we place
+ * the PRAMIN offset of the context) is loaded into either 0x408000,
+ * 0x408004 or 0x408008. Not sure what's up there either.
+ * - The ctxprogs for some cards save 0x400a00 again during the cleanup
+ * path for auto-loadctx.
+ */
+
+#define CP_FLAG_CLEAR 0
+#define CP_FLAG_SET 1
+#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
+#define CP_FLAG_SWAP_DIRECTION_LOAD 0
+#define CP_FLAG_SWAP_DIRECTION_SAVE 1
+#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
+#define CP_FLAG_USER_SAVE_NOT_PENDING 0
+#define CP_FLAG_USER_SAVE_PENDING 1
+#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
+#define CP_FLAG_USER_LOAD_NOT_PENDING 0
+#define CP_FLAG_USER_LOAD_PENDING 1
+#define CP_FLAG_STATUS ((3 * 32) + 0)
+#define CP_FLAG_STATUS_IDLE 0
+#define CP_FLAG_STATUS_BUSY 1
+#define CP_FLAG_AUTO_SAVE ((3 * 32) + 4)
+#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
+#define CP_FLAG_AUTO_SAVE_PENDING 1
+#define CP_FLAG_AUTO_LOAD ((3 * 32) + 5)
+#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
+#define CP_FLAG_AUTO_LOAD_PENDING 1
+#define CP_FLAG_UNK54 ((3 * 32) + 6)
+#define CP_FLAG_UNK54_CLEAR 0
+#define CP_FLAG_UNK54_SET 1
+#define CP_FLAG_ALWAYS ((3 * 32) + 8)
+#define CP_FLAG_ALWAYS_FALSE 0
+#define CP_FLAG_ALWAYS_TRUE 1
+#define CP_FLAG_UNK57 ((3 * 32) + 9)
+#define CP_FLAG_UNK57_CLEAR 0
+#define CP_FLAG_UNK57_SET 1
+
+#define CP_CTX 0x00100000
+#define CP_CTX_COUNT 0x000fc000
+#define CP_CTX_COUNT_SHIFT 14
+#define CP_CTX_REG 0x00003fff
+#define CP_LOAD_SR 0x00200000
+#define CP_LOAD_SR_VALUE 0x000fffff
+#define CP_BRA 0x00400000
+#define CP_BRA_IP 0x0000ff00
+#define CP_BRA_IP_SHIFT 8
+#define CP_BRA_IF_CLEAR 0x00000080
+#define CP_BRA_FLAG 0x0000007f
+#define CP_WAIT 0x00500000
+#define CP_WAIT_SET 0x00000080
+#define CP_WAIT_FLAG 0x0000007f
+#define CP_SET 0x00700000
+#define CP_SET_1 0x00000080
+#define CP_SET_FLAG 0x0000007f
+#define CP_NEXT_TO_SWAP 0x00600007
+#define CP_NEXT_TO_CURRENT 0x00600009
+#define CP_SET_CONTEXT_POINTER 0x0060000a
+#define CP_END 0x0060000e
+#define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */
+#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
+#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "ctx.h"
+
+/* TODO:
+ * - get vs count from 0x1540
+ */
+
+static int
+nv40_graph_vs_count(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ return 8;
+ case 0x40:
+ return 6;
+ case 0x41:
+ case 0x42:
+ return 5;
+ case 0x43:
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ return 3;
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ default:
+ return 1;
+ }
+}
+
+
+enum cp_label {
+ cp_check_load = 1,
+ cp_setup_auto_load,
+ cp_setup_load,
+ cp_setup_save,
+ cp_swap_state,
+ cp_swap_state3d_3_is_save,
+ cp_prepare_exit,
+ cp_exit,
+};
+
+static void
+nv40_graph_construct_general(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ cp_ctx(ctx, 0x4000a4, 1);
+ gr_def(ctx, 0x4000a4, 0x00000008);
+ cp_ctx(ctx, 0x400144, 58);
+ gr_def(ctx, 0x400144, 0x00000001);
+ cp_ctx(ctx, 0x400314, 1);
+ gr_def(ctx, 0x400314, 0x00000000);
+ cp_ctx(ctx, 0x400400, 10);
+ cp_ctx(ctx, 0x400480, 10);
+ cp_ctx(ctx, 0x400500, 19);
+ gr_def(ctx, 0x400514, 0x00040000);
+ gr_def(ctx, 0x400524, 0x55555555);
+ gr_def(ctx, 0x400528, 0x55555555);
+ gr_def(ctx, 0x40052c, 0x55555555);
+ gr_def(ctx, 0x400530, 0x55555555);
+ cp_ctx(ctx, 0x400560, 6);
+ gr_def(ctx, 0x400568, 0x0000ffff);
+ gr_def(ctx, 0x40056c, 0x0000ffff);
+ cp_ctx(ctx, 0x40057c, 5);
+ cp_ctx(ctx, 0x400710, 3);
+ gr_def(ctx, 0x400710, 0x20010001);
+ gr_def(ctx, 0x400714, 0x0f73ef00);
+ cp_ctx(ctx, 0x400724, 1);
+ gr_def(ctx, 0x400724, 0x02008821);
+ cp_ctx(ctx, 0x400770, 3);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x400814, 4);
+ cp_ctx(ctx, 0x400828, 5);
+ cp_ctx(ctx, 0x400840, 5);
+ gr_def(ctx, 0x400850, 0x00000040);
+ cp_ctx(ctx, 0x400858, 4);
+ gr_def(ctx, 0x400858, 0x00000040);
+ gr_def(ctx, 0x40085c, 0x00000040);
+ gr_def(ctx, 0x400864, 0x80000000);
+ cp_ctx(ctx, 0x40086c, 9);
+ gr_def(ctx, 0x40086c, 0x80000000);
+ gr_def(ctx, 0x400870, 0x80000000);
+ gr_def(ctx, 0x400874, 0x80000000);
+ gr_def(ctx, 0x400878, 0x80000000);
+ gr_def(ctx, 0x400888, 0x00000040);
+ gr_def(ctx, 0x40088c, 0x80000000);
+ cp_ctx(ctx, 0x4009c0, 8);
+ gr_def(ctx, 0x4009cc, 0x80000000);
+ gr_def(ctx, 0x4009dc, 0x80000000);
+ } else {
+ cp_ctx(ctx, 0x400840, 20);
+ if (nv44_graph_class(ctx->dev)) {
+ for (i = 0; i < 8; i++)
+ gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
+ }
+ gr_def(ctx, 0x400880, 0x00000040);
+ gr_def(ctx, 0x400884, 0x00000040);
+ gr_def(ctx, 0x400888, 0x00000040);
+ cp_ctx(ctx, 0x400894, 11);
+ gr_def(ctx, 0x400894, 0x00000040);
+ if (!nv44_graph_class(ctx->dev)) {
+ for (i = 0; i < 8; i++)
+ gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
+ }
+ cp_ctx(ctx, 0x4008e0, 2);
+ cp_ctx(ctx, 0x4008f8, 2);
+ if (dev_priv->chipset == 0x4c ||
+ (dev_priv->chipset & 0xf0) == 0x60)
+ cp_ctx(ctx, 0x4009f8, 1);
+ }
+ cp_ctx(ctx, 0x400a00, 73);
+ gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
+ cp_ctx(ctx, 0x401000, 4);
+ cp_ctx(ctx, 0x405004, 1);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x403448, 1);
+ gr_def(ctx, 0x403448, 0x00001010);
+ break;
+ default:
+ cp_ctx(ctx, 0x403440, 1);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x403440, 0x00000010);
+ break;
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ gr_def(ctx, 0x403440, 0x00003010);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ default:
+ gr_def(ctx, 0x403440, 0x00001010);
+ break;
+ }
+ break;
+ }
+}
+
+static void
+nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x401880, 51);
+ gr_def(ctx, 0x401940, 0x00000100);
+ } else
+ if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
+ dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
+ cp_ctx(ctx, 0x401880, 32);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
+ if (dev_priv->chipset == 0x46)
+ cp_ctx(ctx, 0x401900, 16);
+ cp_ctx(ctx, 0x401940, 3);
+ }
+ cp_ctx(ctx, 0x40194c, 18);
+ gr_def(ctx, 0x401954, 0x00000111);
+ gr_def(ctx, 0x401958, 0x00080060);
+ gr_def(ctx, 0x401974, 0x00000080);
+ gr_def(ctx, 0x401978, 0xffff0000);
+ gr_def(ctx, 0x40197c, 0x00000001);
+ gr_def(ctx, 0x401990, 0x46400000);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x4019a0, 2);
+ cp_ctx(ctx, 0x4019ac, 5);
+ } else {
+ cp_ctx(ctx, 0x4019a0, 1);
+ cp_ctx(ctx, 0x4019b4, 3);
+ }
+ gr_def(ctx, 0x4019bc, 0xffff0000);
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x4019c0, 18);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888);
+ break;
+ }
+ cp_ctx(ctx, 0x401a08, 8);
+ gr_def(ctx, 0x401a10, 0x0fff0000);
+ gr_def(ctx, 0x401a14, 0x0fff0000);
+ gr_def(ctx, 0x401a1c, 0x00011100);
+ cp_ctx(ctx, 0x401a2c, 4);
+ cp_ctx(ctx, 0x401a44, 26);
+ for (i = 0; i < 16; i++)
+ gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
+ gr_def(ctx, 0x401a8c, 0x4b7fffff);
+ if (dev_priv->chipset == 0x40) {
+ cp_ctx(ctx, 0x401ab8, 3);
+ } else {
+ cp_ctx(ctx, 0x401ab8, 1);
+ cp_ctx(ctx, 0x401ac0, 1);
+ }
+ cp_ctx(ctx, 0x401ad0, 8);
+ gr_def(ctx, 0x401ad0, 0x30201000);
+ gr_def(ctx, 0x401ad4, 0x70605040);
+ gr_def(ctx, 0x401ad8, 0xb8a89888);
+ gr_def(ctx, 0x401adc, 0xf8e8d8c8);
+ cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
+ gr_def(ctx, 0x401b10, 0x40100000);
+ cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
+ gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
+ 0x00000004 : 0x00000000);
+ cp_ctx(ctx, 0x401b30, 25);
+ gr_def(ctx, 0x401b34, 0x0000ffff);
+ gr_def(ctx, 0x401b68, 0x435185d6);
+ gr_def(ctx, 0x401b6c, 0x2155b699);
+ gr_def(ctx, 0x401b70, 0xfedcba98);
+ gr_def(ctx, 0x401b74, 0x00000098);
+ gr_def(ctx, 0x401b84, 0xffffffff);
+ gr_def(ctx, 0x401b88, 0x00ff7000);
+ gr_def(ctx, 0x401b8c, 0x0000ffff);
+ if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
+ dev_priv->chipset != 0x4e)
+ cp_ctx(ctx, 0x401b94, 1);
+ cp_ctx(ctx, 0x401b98, 8);
+ gr_def(ctx, 0x401b9c, 0x00ff0000);
+ cp_ctx(ctx, 0x401bc0, 9);
+ gr_def(ctx, 0x401be0, 0x00ffff00);
+ cp_ctx(ctx, 0x401c00, 192);
+ for (i = 0; i < 16; i++) { /* fragment texture units */
+ gr_def(ctx, 0x401c40 + (i * 4), 0x00018488);
+ gr_def(ctx, 0x401c80 + (i * 4), 0x00028202);
+ gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4);
+ gr_def(ctx, 0x401d40 + (i * 4), 0x01012000);
+ gr_def(ctx, 0x401d80 + (i * 4), 0x00080008);
+ gr_def(ctx, 0x401e00 + (i * 4), 0x00100008);
+ }
+ for (i = 0; i < 4; i++) { /* vertex texture units */
+ gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80);
+ gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202);
+ gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008);
+ gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008);
+ }
+ cp_ctx(ctx, 0x400f5c, 3);
+ gr_def(ctx, 0x400f5c, 0x00000002);
+ cp_ctx(ctx, 0x400f84, 1);
+}
+
+static void
+nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+
+ cp_ctx(ctx, 0x402000, 1);
+ cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x402404, 0x00000001);
+ break;
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ gr_def(ctx, 0x402404, 0x00000020);
+ break;
+ case 0x46:
+ case 0x49:
+ case 0x4b:
+ gr_def(ctx, 0x402404, 0x00000421);
+ break;
+ default:
+ gr_def(ctx, 0x402404, 0x00000021);
+ }
+ if (dev_priv->chipset != 0x40)
+ gr_def(ctx, 0x402408, 0x030c30c3);
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x46:
+ case 0x4a:
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ cp_ctx(ctx, 0x402440, 1);
+ gr_def(ctx, 0x402440, 0x00011001);
+ break;
+ default:
+ break;
+ }
+ cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
+ gr_def(ctx, 0x402488, 0x3e020200);
+ gr_def(ctx, 0x40248c, 0x00ffffff);
+ switch (dev_priv->chipset) {
+ case 0x40:
+ gr_def(ctx, 0x402490, 0x60103f00);
+ break;
+ case 0x47:
+ gr_def(ctx, 0x402490, 0x40103f00);
+ break;
+ case 0x41:
+ case 0x42:
+ case 0x49:
+ case 0x4b:
+ gr_def(ctx, 0x402490, 0x20103f00);
+ break;
+ default:
+ gr_def(ctx, 0x402490, 0x0c103f00);
+ break;
+ }
+ gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
+ 0x00020000 : 0x00040000);
+ cp_ctx(ctx, 0x402500, 31);
+ gr_def(ctx, 0x402530, 0x00008100);
+ if (dev_priv->chipset == 0x40)
+ cp_ctx(ctx, 0x40257c, 6);
+ cp_ctx(ctx, 0x402594, 16);
+ cp_ctx(ctx, 0x402800, 17);
+ gr_def(ctx, 0x402800, 0x00000001);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x402864, 1);
+ gr_def(ctx, 0x402864, 0x00001001);
+ cp_ctx(ctx, 0x402870, 3);
+ gr_def(ctx, 0x402878, 0x00000003);
+ if (dev_priv->chipset != 0x47) { /* belong at end!! */
+ cp_ctx(ctx, 0x402900, 1);
+ cp_ctx(ctx, 0x402940, 1);
+ cp_ctx(ctx, 0x402980, 1);
+ cp_ctx(ctx, 0x4029c0, 1);
+ cp_ctx(ctx, 0x402a00, 1);
+ cp_ctx(ctx, 0x402a40, 1);
+ cp_ctx(ctx, 0x402a80, 1);
+ cp_ctx(ctx, 0x402ac0, 1);
+ }
+ break;
+ case 0x40:
+ cp_ctx(ctx, 0x402844, 1);
+ gr_def(ctx, 0x402844, 0x00000001);
+ cp_ctx(ctx, 0x402850, 1);
+ break;
+ default:
+ cp_ctx(ctx, 0x402844, 1);
+ gr_def(ctx, 0x402844, 0x00001001);
+ cp_ctx(ctx, 0x402850, 2);
+ gr_def(ctx, 0x402854, 0x00000003);
+ break;
+ }
+
+ cp_ctx(ctx, 0x402c00, 4);
+ gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
+ 0x80800001 : 0x00888001);
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x402c20, 40);
+ for (i = 0; i < 32; i++)
+ gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff);
+ cp_ctx(ctx, 0x4030b8, 13);
+ gr_def(ctx, 0x4030dc, 0x00000005);
+ gr_def(ctx, 0x4030e8, 0x0000ffff);
+ break;
+ default:
+ cp_ctx(ctx, 0x402c10, 4);
+ if (dev_priv->chipset == 0x40)
+ cp_ctx(ctx, 0x402c20, 36);
+ else
+ if (dev_priv->chipset <= 0x42)
+ cp_ctx(ctx, 0x402c20, 24);
+ else
+ if (dev_priv->chipset <= 0x4a)
+ cp_ctx(ctx, 0x402c20, 16);
+ else
+ cp_ctx(ctx, 0x402c20, 8);
+ cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
+ gr_def(ctx, 0x402cd4, 0x00000005);
+ if (dev_priv->chipset != 0x40)
+ gr_def(ctx, 0x402ce0, 0x0000ffff);
+ break;
+ }
+
+ cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
+ cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
+ for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
+ gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
+
+ if (dev_priv->chipset != 0x40) {
+ cp_ctx(ctx, 0x403600, 1);
+ gr_def(ctx, 0x403600, 0x00000001);
+ }
+ cp_ctx(ctx, 0x403800, 1);
+
+ cp_ctx(ctx, 0x403c18, 1);
+ gr_def(ctx, 0x403c18, 0x00000001);
+ switch (dev_priv->chipset) {
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ cp_ctx(ctx, 0x405018, 1);
+ gr_def(ctx, 0x405018, 0x08e00001);
+ cp_ctx(ctx, 0x405c24, 1);
+ gr_def(ctx, 0x405c24, 0x000e3000);
+ break;
+ }
+ if (dev_priv->chipset != 0x4e)
+ cp_ctx(ctx, 0x405800, 11);
+ cp_ctx(ctx, 0x407000, 1);
+}
+
+static void
+nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
+{
+ int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
+
+ cp_out (ctx, 0x300000);
+ cp_lsr (ctx, len - 4);
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save);
+ cp_lsr (ctx, len);
+ cp_name(ctx, cp_swap_state3d_3_is_save);
+ cp_out (ctx, 0x800001);
+
+ ctx->ctxvals_pos += len;
+}
+
+static void
+nv40_graph_construct_shader(struct nouveau_grctx *ctx)
+{
+ struct drm_device *dev = ctx->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = ctx->data;
+ int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
+ int offset, i;
+
+ vs_nr = nv40_graph_vs_count(ctx->dev);
+ vs_nr_b0 = 363;
+ vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
+ if (dev_priv->chipset == 0x40) {
+ b0_offset = 0x2200/4; /* 33a0 */
+ b1_offset = 0x55a0/4; /* 1500 */
+ vs_len = 0x6aa0/4;
+ } else
+ if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
+ b0_offset = 0x2200/4; /* 2200 */
+ b1_offset = 0x4400/4; /* 0b00 */
+ vs_len = 0x4f00/4;
+ } else {
+ b0_offset = 0x1d40/4; /* 2200 */
+ b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
+ vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
+ }
+
+ cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
+ cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
+
+ offset = ctx->ctxvals_pos;
+ ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
+
+ if (ctx->mode != NOUVEAU_GRCTX_VALS)
+ return;
+
+ offset += 0x0280/4;
+ for (i = 0; i < 16; i++, offset += 2)
+ nv_wo32(obj, offset * 4, 0x3f800000);
+
+ for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
+ for (i = 0; i < vs_nr_b0 * 6; i += 6)
+ nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
+ for (i = 0; i < vs_nr_b1 * 4; i += 4)
+ nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
+ }
+}
+
+static void
+nv40_grctx_generate(struct nouveau_grctx *ctx)
+{
+ /* decide whether we're loading/unloading the context */
+ cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
+
+ cp_name(ctx, cp_check_load);
+ cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
+ cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
+ cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+
+ /* setup for context load */
+ cp_name(ctx, cp_setup_auto_load);
+ cp_wait(ctx, STATUS, IDLE);
+ cp_out (ctx, CP_NEXT_TO_SWAP);
+ cp_name(ctx, cp_setup_load);
+ cp_wait(ctx, STATUS, IDLE);
+ cp_set (ctx, SWAP_DIRECTION, LOAD);
+ cp_out (ctx, 0x00910880); /* ?? */
+ cp_out (ctx, 0x00901ffe); /* ?? */
+ cp_out (ctx, 0x01940000); /* ?? */
+ cp_lsr (ctx, 0x20);
+ cp_out (ctx, 0x0060000b); /* ?? */
+ cp_wait(ctx, UNK57, CLEAR);
+ cp_out (ctx, 0x0060000c); /* ?? */
+ cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
+
+ /* setup for context save */
+ cp_name(ctx, cp_setup_save);
+ cp_set (ctx, SWAP_DIRECTION, SAVE);
+
+ /* general PGRAPH state */
+ cp_name(ctx, cp_swap_state);
+ cp_pos (ctx, 0x00020/4);
+ nv40_graph_construct_general(ctx);
+ cp_wait(ctx, STATUS, IDLE);
+
+ /* 3D state, block 1 */
+ cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
+ nv40_graph_construct_state3d(ctx);
+ cp_wait(ctx, STATUS, IDLE);
+
+ /* 3D state, block 2 */
+ nv40_graph_construct_state3d_2(ctx);
+
+ /* Some other block of "random" state */
+ nv40_graph_construct_state3d_3(ctx);
+
+ /* Per-vertex shader state */
+ cp_pos (ctx, ctx->ctxvals_pos);
+ nv40_graph_construct_shader(ctx);
+
+ /* pre-exit state updates */
+ cp_name(ctx, cp_prepare_exit);
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
+ cp_out (ctx, CP_NEXT_TO_CURRENT);
+
+ cp_name(ctx, cp_exit);
+ cp_set (ctx, USER_SAVE, NOT_PENDING);
+ cp_set (ctx, USER_LOAD, NOT_PENDING);
+ cp_out (ctx, CP_END);
+}
+
+void
+nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+{
+ nv40_grctx_generate(&(struct nouveau_grctx) {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_VALS,
+ .data = mem,
+ });
+}
+
+void
+nv40_grctx_init(struct drm_device *dev, u32 *size)
+{
+ u32 ctxprog[256], i;
+ struct nouveau_grctx ctx = {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_PROG,
+ .data = ctxprog,
+ .ctxprog_max = ARRAY_SIZE(ctxprog)
+ };
+
+ nv40_grctx_generate(&ctx);
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
+ *size = ctx.ctxvals_pos * 4;
+}
--- /dev/null
+/*
+ * Copyright 2009 Marcin Kościelnicki
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define CP_FLAG_CLEAR 0
+#define CP_FLAG_SET 1
+#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
+#define CP_FLAG_SWAP_DIRECTION_LOAD 0
+#define CP_FLAG_SWAP_DIRECTION_SAVE 1
+#define CP_FLAG_UNK01 ((0 * 32) + 1)
+#define CP_FLAG_UNK01_CLEAR 0
+#define CP_FLAG_UNK01_SET 1
+#define CP_FLAG_UNK03 ((0 * 32) + 3)
+#define CP_FLAG_UNK03_CLEAR 0
+#define CP_FLAG_UNK03_SET 1
+#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
+#define CP_FLAG_USER_SAVE_NOT_PENDING 0
+#define CP_FLAG_USER_SAVE_PENDING 1
+#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
+#define CP_FLAG_USER_LOAD_NOT_PENDING 0
+#define CP_FLAG_USER_LOAD_PENDING 1
+#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
+#define CP_FLAG_UNK0B_CLEAR 0
+#define CP_FLAG_UNK0B_SET 1
+#define CP_FLAG_XFER_SWITCH ((0 * 32) + 0xe)
+#define CP_FLAG_XFER_SWITCH_DISABLE 0
+#define CP_FLAG_XFER_SWITCH_ENABLE 1
+#define CP_FLAG_STATE ((0 * 32) + 0x1c)
+#define CP_FLAG_STATE_STOPPED 0
+#define CP_FLAG_STATE_RUNNING 1
+#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
+#define CP_FLAG_UNK1D_CLEAR 0
+#define CP_FLAG_UNK1D_SET 1
+#define CP_FLAG_UNK20 ((1 * 32) + 0)
+#define CP_FLAG_UNK20_CLEAR 0
+#define CP_FLAG_UNK20_SET 1
+#define CP_FLAG_STATUS ((2 * 32) + 0)
+#define CP_FLAG_STATUS_BUSY 0
+#define CP_FLAG_STATUS_IDLE 1
+#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4)
+#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
+#define CP_FLAG_AUTO_SAVE_PENDING 1
+#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
+#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
+#define CP_FLAG_AUTO_LOAD_PENDING 1
+#define CP_FLAG_NEWCTX ((2 * 32) + 10)
+#define CP_FLAG_NEWCTX_BUSY 0
+#define CP_FLAG_NEWCTX_DONE 1
+#define CP_FLAG_XFER ((2 * 32) + 11)
+#define CP_FLAG_XFER_IDLE 0
+#define CP_FLAG_XFER_BUSY 1
+#define CP_FLAG_ALWAYS ((2 * 32) + 13)
+#define CP_FLAG_ALWAYS_FALSE 0
+#define CP_FLAG_ALWAYS_TRUE 1
+#define CP_FLAG_INTR ((2 * 32) + 15)
+#define CP_FLAG_INTR_NOT_PENDING 0
+#define CP_FLAG_INTR_PENDING 1
+
+#define CP_CTX 0x00100000
+#define CP_CTX_COUNT 0x000f0000
+#define CP_CTX_COUNT_SHIFT 16
+#define CP_CTX_REG 0x00003fff
+#define CP_LOAD_SR 0x00200000
+#define CP_LOAD_SR_VALUE 0x000fffff
+#define CP_BRA 0x00400000
+#define CP_BRA_IP 0x0001ff00
+#define CP_BRA_IP_SHIFT 8
+#define CP_BRA_IF_CLEAR 0x00000080
+#define CP_BRA_FLAG 0x0000007f
+#define CP_WAIT 0x00500000
+#define CP_WAIT_SET 0x00000080
+#define CP_WAIT_FLAG 0x0000007f
+#define CP_SET 0x00700000
+#define CP_SET_1 0x00000080
+#define CP_SET_FLAG 0x0000007f
+#define CP_NEWCTX 0x00600004
+#define CP_NEXT_TO_SWAP 0x00600005
+#define CP_SET_CONTEXT_POINTER 0x00600006
+#define CP_SET_XFER_POINTER 0x00600007
+#define CP_ENABLE 0x00600009
+#define CP_END 0x0060000c
+#define CP_NEXT_TO_CURRENT 0x0060000d
+#define CP_DISABLE1 0x0090ffff
+#define CP_DISABLE2 0x0091ffff
+#define CP_XFER_1 0x008000ff
+#define CP_XFER_2 0x008800ff
+#define CP_SEEK_1 0x00c000ff
+#define CP_SEEK_2 0x00c800ff
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "ctx.h"
+
+#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
+#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
+
+/*
+ * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
+ * the GPU itself that does context-switching, but it needs a special
+ * microcode to do it. And it's the driver's task to supply this microcode,
+ * further known as ctxprog, as well as the initial context values, known
+ * as ctxvals.
+ *
+ * Without ctxprog, you cannot switch contexts. Not even in software, since
+ * the majority of context [xfer strands] isn't accessible directly. You're
+ * stuck with a single channel, and you also suffer all the problems resulting
+ * from missing ctxvals, since you cannot load them.
+ *
+ * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to
+ * run 2d operations, but trying to utilise 3d or CUDA will just lock you up,
+ * since you don't have... some sort of needed setup.
+ *
+ * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since
+ * it's too much hassle to handle no-ctxprog as a special case.
+ */
+
+/*
+ * How ctxprogs work.
+ *
+ * The ctxprog is written in its own kind of microcode, with very small and
+ * crappy set of available commands. You upload it to a small [512 insns]
+ * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to
+ * switch channel. or when the driver explicitely requests it. Stuff visible
+ * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands,
+ * the per-channel context save area in VRAM [known as ctxvals or grctx],
+ * 4 flags registers, a scratch register, two grctx pointers, plus many
+ * random poorly-understood details.
+ *
+ * When ctxprog runs, it's supposed to check what operations are asked of it,
+ * save old context if requested, optionally reset PGRAPH and switch to the
+ * new channel, and load the new context. Context consists of three major
+ * parts: subset of MMIO registers and two "xfer areas".
+ */
+
+/* TODO:
+ * - document unimplemented bits compared to nvidia
+ * - NVAx: make a TP subroutine, use it.
+ * - use 0x4008fc instead of 0x1540?
+ */
+
+enum cp_label {
+ cp_check_load = 1,
+ cp_setup_auto_load,
+ cp_setup_load,
+ cp_setup_save,
+ cp_swap_state,
+ cp_prepare_exit,
+ cp_exit,
+};
+
+static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
+
+/* Main function: construct the ctxprog skeleton, call the other functions. */
+
+static int
+nv50_grctx_generate(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
+ break;
+ default:
+ NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
+ "your NV%x card.\n", dev_priv->chipset);
+ NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
+ "the devs.\n");
+ return -ENOSYS;
+ }
+
+ cp_set (ctx, STATE, RUNNING);
+ cp_set (ctx, XFER_SWITCH, ENABLE);
+ /* decide whether we're loading/unloading the context */
+ cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
+
+ cp_name(ctx, cp_check_load);
+ cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
+ cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
+ cp_bra (ctx, ALWAYS, TRUE, cp_prepare_exit);
+
+ /* setup for context load */
+ cp_name(ctx, cp_setup_auto_load);
+ cp_out (ctx, CP_DISABLE1);
+ cp_out (ctx, CP_DISABLE2);
+ cp_out (ctx, CP_ENABLE);
+ cp_out (ctx, CP_NEXT_TO_SWAP);
+ cp_set (ctx, UNK01, SET);
+ cp_name(ctx, cp_setup_load);
+ cp_out (ctx, CP_NEWCTX);
+ cp_wait(ctx, NEWCTX, BUSY);
+ cp_set (ctx, UNK1D, CLEAR);
+ cp_set (ctx, SWAP_DIRECTION, LOAD);
+ cp_bra (ctx, UNK0B, SET, cp_prepare_exit);
+ cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
+
+ /* setup for context save */
+ cp_name(ctx, cp_setup_save);
+ cp_set (ctx, UNK1D, SET);
+ cp_wait(ctx, STATUS, BUSY);
+ cp_wait(ctx, INTR, PENDING);
+ cp_bra (ctx, STATUS, BUSY, cp_setup_save);
+ cp_set (ctx, UNK01, SET);
+ cp_set (ctx, SWAP_DIRECTION, SAVE);
+
+ /* general PGRAPH state */
+ cp_name(ctx, cp_swap_state);
+ cp_set (ctx, UNK03, SET);
+ cp_pos (ctx, 0x00004/4);
+ cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
+ cp_pos (ctx, 0x00100/4);
+ nv50_graph_construct_mmio(ctx);
+ nv50_graph_construct_xfer1(ctx);
+ nv50_graph_construct_xfer2(ctx);
+
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
+
+ cp_set (ctx, UNK20, SET);
+ cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */
+ cp_lsr (ctx, ctx->ctxvals_base);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, 4);
+ cp_out (ctx, CP_SEEK_1);
+ cp_out (ctx, CP_XFER_1);
+ cp_wait(ctx, XFER, BUSY);
+
+ /* pre-exit state updates */
+ cp_name(ctx, cp_prepare_exit);
+ cp_set (ctx, UNK01, CLEAR);
+ cp_set (ctx, UNK03, CLEAR);
+ cp_set (ctx, UNK1D, CLEAR);
+
+ cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
+ cp_out (ctx, CP_NEXT_TO_CURRENT);
+
+ cp_name(ctx, cp_exit);
+ cp_set (ctx, USER_SAVE, NOT_PENDING);
+ cp_set (ctx, USER_LOAD, NOT_PENDING);
+ cp_set (ctx, XFER_SWITCH, DISABLE);
+ cp_set (ctx, STATE, STOPPED);
+ cp_out (ctx, CP_END);
+ ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
+
+ return 0;
+}
+
+void
+nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
+{
+ nv50_grctx_generate(&(struct nouveau_grctx) {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_VALS,
+ .data = mem,
+ });
+}
+
+int
+nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt)
+{
+ struct nouveau_grctx ctx = {
+ .dev = dev,
+ .mode = NOUVEAU_GRCTX_PROG,
+ .data = data,
+ .ctxprog_max = max
+ };
+ int ret;
+
+ ret = nv50_grctx_generate(&ctx);
+ *cnt = ctx.ctxvals_pos * 4;
+ *len = ctx.ctxprog_len;
+ return ret;
+}
+
+/*
+ * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
+ * registers to save/restore and the default values for them.
+ */
+
+static void
+nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
+
+static void
+nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i, j;
+ int offset, base;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+
+ /* 0800: DISPATCH */
+ cp_ctx(ctx, 0x400808, 7);
+ gr_def(ctx, 0x400814, 0x00000030);
+ cp_ctx(ctx, 0x400834, 0x32);
+ if (dev_priv->chipset == 0x50) {
+ gr_def(ctx, 0x400834, 0xff400040);
+ gr_def(ctx, 0x400838, 0xfff00080);
+ gr_def(ctx, 0x40083c, 0xfff70090);
+ gr_def(ctx, 0x400840, 0xffe806a8);
+ }
+ gr_def(ctx, 0x400844, 0x00000002);
+ if (IS_NVA3F(dev_priv->chipset))
+ gr_def(ctx, 0x400894, 0x00001000);
+ gr_def(ctx, 0x4008e8, 0x00000003);
+ gr_def(ctx, 0x4008ec, 0x00001000);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x400908, 0xb);
+ else if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, 0x400908, 0xc);
+ else
+ cp_ctx(ctx, 0x400908, 0xe);
+
+ if (dev_priv->chipset >= 0xa0)
+ cp_ctx(ctx, 0x400b00, 0x1);
+ if (IS_NVA3F(dev_priv->chipset)) {
+ cp_ctx(ctx, 0x400b10, 0x1);
+ gr_def(ctx, 0x400b10, 0x0001629d);
+ cp_ctx(ctx, 0x400b20, 0x1);
+ gr_def(ctx, 0x400b20, 0x0001629d);
+ }
+
+ nv50_graph_construct_mmio_ddata(ctx);
+
+ /* 0C00: VFETCH */
+ cp_ctx(ctx, 0x400c08, 0x2);
+ gr_def(ctx, 0x400c08, 0x0000fe0c);
+
+ /* 1000 */
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x401008, 0x4);
+ gr_def(ctx, 0x401014, 0x00001000);
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
+ cp_ctx(ctx, 0x401008, 0x5);
+ gr_def(ctx, 0x401018, 0x00001000);
+ } else {
+ cp_ctx(ctx, 0x401008, 0x5);
+ gr_def(ctx, 0x401018, 0x00004000);
+ }
+
+ /* 1400 */
+ cp_ctx(ctx, 0x401400, 0x8);
+ cp_ctx(ctx, 0x401424, 0x3);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x40142c, 0x0001fd87);
+ else
+ gr_def(ctx, 0x40142c, 0x00000187);
+ cp_ctx(ctx, 0x401540, 0x5);
+ gr_def(ctx, 0x401550, 0x00001018);
+
+ /* 1800: STREAMOUT */
+ cp_ctx(ctx, 0x401814, 0x1);
+ gr_def(ctx, 0x401814, 0x000000ff);
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, 0x40181c, 0xe);
+ gr_def(ctx, 0x401850, 0x00000004);
+ } else if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x40181c, 0xf);
+ gr_def(ctx, 0x401854, 0x00000004);
+ } else {
+ cp_ctx(ctx, 0x40181c, 0x13);
+ gr_def(ctx, 0x401864, 0x00000004);
+ }
+
+ /* 1C00 */
+ cp_ctx(ctx, 0x401c00, 0x1);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, 0x401c00, 0x0001005f);
+ break;
+ case 0x84:
+ case 0x86:
+ case 0x94:
+ gr_def(ctx, 0x401c00, 0x044d00df);
+ break;
+ case 0x92:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ gr_def(ctx, 0x401c00, 0x042500df);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ case 0xaf:
+ gr_def(ctx, 0x401c00, 0x142500df);
+ break;
+ }
+
+ /* 2000 */
+
+ /* 2400 */
+ cp_ctx(ctx, 0x402400, 0x1);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x402408, 0x1);
+ else
+ cp_ctx(ctx, 0x402408, 0x2);
+ gr_def(ctx, 0x402408, 0x00000600);
+
+ /* 2800: CSCHED */
+ cp_ctx(ctx, 0x402800, 0x1);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x402800, 0x00000006);
+
+ /* 2C00: ZCULL */
+ cp_ctx(ctx, 0x402c08, 0x6);
+ if (dev_priv->chipset != 0x50)
+ gr_def(ctx, 0x402c14, 0x01000000);
+ gr_def(ctx, 0x402c18, 0x000000ff);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x402ca0, 0x1);
+ else
+ cp_ctx(ctx, 0x402ca0, 0x2);
+ if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x402ca0, 0x00000400);
+ else if (!IS_NVA3F(dev_priv->chipset))
+ gr_def(ctx, 0x402ca0, 0x00000800);
+ else
+ gr_def(ctx, 0x402ca0, 0x00000400);
+ cp_ctx(ctx, 0x402cac, 0x4);
+
+ /* 3000: ENG2D */
+ cp_ctx(ctx, 0x403004, 0x1);
+ gr_def(ctx, 0x403004, 0x00000001);
+
+ /* 3400 */
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, 0x403404, 0x1);
+ gr_def(ctx, 0x403404, 0x00000001);
+ }
+
+ /* 5000: CCACHE */
+ cp_ctx(ctx, 0x405000, 0x1);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, 0x405000, 0x00300080);
+ break;
+ case 0x84:
+ case 0xa0:
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
+ gr_def(ctx, 0x405000, 0x000e0080);
+ break;
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ gr_def(ctx, 0x405000, 0x00000080);
+ break;
+ }
+ cp_ctx(ctx, 0x405014, 0x1);
+ gr_def(ctx, 0x405014, 0x00000004);
+ cp_ctx(ctx, 0x40501c, 0x1);
+ cp_ctx(ctx, 0x405024, 0x1);
+ cp_ctx(ctx, 0x40502c, 0x1);
+
+ /* 6000? */
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x4063e0, 0x1);
+
+ /* 6800: M2MF */
+ if (dev_priv->chipset < 0x90) {
+ cp_ctx(ctx, 0x406814, 0x2b);
+ gr_def(ctx, 0x406818, 0x00000f80);
+ gr_def(ctx, 0x406860, 0x007f0080);
+ gr_def(ctx, 0x40689c, 0x007f0080);
+ } else {
+ cp_ctx(ctx, 0x406814, 0x4);
+ if (dev_priv->chipset == 0x98)
+ gr_def(ctx, 0x406818, 0x00000f80);
+ else
+ gr_def(ctx, 0x406818, 0x00001f80);
+ if (IS_NVA3F(dev_priv->chipset))
+ gr_def(ctx, 0x40681c, 0x00000030);
+ cp_ctx(ctx, 0x406830, 0x3);
+ }
+
+ /* 7000: per-ROP group state */
+ for (i = 0; i < 8; i++) {
+ if (units & (1<<(i+16))) {
+ cp_ctx(ctx, 0x407000 + (i<<8), 3);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
+ else if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
+ else
+ gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
+ gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
+
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, 0x407010 + (i<<8), 1);
+ } else if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x407010 + (i<<8), 2);
+ gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
+ gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
+ } else {
+ cp_ctx(ctx, 0x407010 + (i<<8), 3);
+ gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
+ if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
+ else
+ gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
+ }
+
+ cp_ctx(ctx, 0x407080 + (i<<8), 4);
+ if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
+ else
+ gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
+ else
+ gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
+ gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, 0x407094 + (i<<8), 1);
+ else if (!IS_NVA3F(dev_priv->chipset))
+ cp_ctx(ctx, 0x407094 + (i<<8), 3);
+ else {
+ cp_ctx(ctx, 0x407094 + (i<<8), 4);
+ gr_def(ctx, 0x4070a0 + (i<<8), 1);
+ }
+ }
+ }
+
+ cp_ctx(ctx, 0x407c00, 0x3);
+ if (dev_priv->chipset < 0x90)
+ gr_def(ctx, 0x407c00, 0x00010040);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x407c00, 0x00390040);
+ else
+ gr_def(ctx, 0x407c00, 0x003d0040);
+ gr_def(ctx, 0x407c08, 0x00000022);
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, 0x407c10, 0x3);
+ cp_ctx(ctx, 0x407c20, 0x1);
+ cp_ctx(ctx, 0x407c2c, 0x1);
+ }
+
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x407d00, 0x9);
+ } else {
+ cp_ctx(ctx, 0x407d00, 0x15);
+ }
+ if (dev_priv->chipset == 0x98)
+ gr_def(ctx, 0x407d08, 0x00380040);
+ else {
+ if (dev_priv->chipset < 0x90)
+ gr_def(ctx, 0x407d08, 0x00010040);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x407d08, 0x00390040);
+ else
+ gr_def(ctx, 0x407d08, 0x003d0040);
+ gr_def(ctx, 0x407d0c, 0x00000022);
+ }
+
+ /* 8000+: per-TP state */
+ for (i = 0; i < 10; i++) {
+ if (units & (1<<i)) {
+ if (dev_priv->chipset < 0xa0)
+ base = 0x408000 + (i<<12);
+ else
+ base = 0x408000 + (i<<11);
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xc00;
+ else
+ offset = base + 0x80;
+ cp_ctx(ctx, offset + 0x00, 1);
+ gr_def(ctx, offset + 0x00, 0x0000ff0a);
+ cp_ctx(ctx, offset + 0x08, 1);
+
+ /* per-MP state */
+ for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
+ if (!(units & (1 << (j+24)))) continue;
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0x200 + (j<<7);
+ else
+ offset = base + 0x100 + (j<<7);
+ cp_ctx(ctx, offset, 0x20);
+ gr_def(ctx, offset + 0x00, 0x01800000);
+ gr_def(ctx, offset + 0x04, 0x00160000);
+ gr_def(ctx, offset + 0x08, 0x01800000);
+ gr_def(ctx, offset + 0x18, 0x0003ffff);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, offset + 0x1c, 0x00080000);
+ break;
+ case 0x84:
+ gr_def(ctx, offset + 0x1c, 0x00880000);
+ break;
+ case 0x86:
+ gr_def(ctx, offset + 0x1c, 0x018c0000);
+ break;
+ case 0x92:
+ case 0x96:
+ case 0x98:
+ gr_def(ctx, offset + 0x1c, 0x118c0000);
+ break;
+ case 0x94:
+ gr_def(ctx, offset + 0x1c, 0x10880000);
+ break;
+ case 0xa0:
+ case 0xa5:
+ gr_def(ctx, offset + 0x1c, 0x310c0000);
+ break;
+ case 0xa3:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
+ gr_def(ctx, offset + 0x1c, 0x300c0000);
+ break;
+ }
+ gr_def(ctx, offset + 0x40, 0x00010401);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x48, 0x00000040);
+ else
+ gr_def(ctx, offset + 0x48, 0x00000078);
+ gr_def(ctx, offset + 0x50, 0x000000bf);
+ gr_def(ctx, offset + 0x58, 0x00001210);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x5c, 0x00000080);
+ else
+ gr_def(ctx, offset + 0x5c, 0x08000080);
+ if (dev_priv->chipset >= 0xa0)
+ gr_def(ctx, offset + 0x68, 0x0000003e);
+ }
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x300, 0x4);
+ else
+ cp_ctx(ctx, base + 0x300, 0x5);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, base + 0x304, 0x00007070);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, base + 0x304, 0x00027070);
+ else if (!IS_NVA3F(dev_priv->chipset))
+ gr_def(ctx, base + 0x304, 0x01127070);
+ else
+ gr_def(ctx, base + 0x304, 0x05127070);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x318, 1);
+ else
+ cp_ctx(ctx, base + 0x320, 1);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, base + 0x318, 0x0003ffff);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, base + 0x318, 0x03ffffff);
+ else
+ gr_def(ctx, base + 0x320, 0x07ffffff);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x324, 5);
+ else
+ cp_ctx(ctx, base + 0x328, 4);
+
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, base + 0x340, 9);
+ offset = base + 0x340;
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
+ cp_ctx(ctx, base + 0x33c, 0xb);
+ offset = base + 0x344;
+ } else {
+ cp_ctx(ctx, base + 0x33c, 0xd);
+ offset = base + 0x344;
+ }
+ gr_def(ctx, offset + 0x0, 0x00120407);
+ gr_def(ctx, offset + 0x4, 0x05091507);
+ if (dev_priv->chipset == 0x84)
+ gr_def(ctx, offset + 0x8, 0x05100202);
+ else
+ gr_def(ctx, offset + 0x8, 0x05010202);
+ gr_def(ctx, offset + 0xc, 0x00030201);
+ if (dev_priv->chipset == 0xa3)
+ cp_ctx(ctx, base + 0x36c, 1);
+
+ cp_ctx(ctx, base + 0x400, 2);
+ gr_def(ctx, base + 0x404, 0x00000040);
+ cp_ctx(ctx, base + 0x40c, 2);
+ gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
+ gr_def(ctx, base + 0x410, 0x00141210);
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0x800;
+ else
+ offset = base + 0x500;
+ cp_ctx(ctx, offset, 6);
+ gr_def(ctx, offset + 0x0, 0x000001f0);
+ gr_def(ctx, offset + 0x4, 0x00000001);
+ gr_def(ctx, offset + 0x8, 0x00000003);
+ if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset))
+ gr_def(ctx, offset + 0xc, 0x00008000);
+ gr_def(ctx, offset + 0x14, 0x00039e00);
+ cp_ctx(ctx, offset + 0x1c, 2);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x1c, 0x00000040);
+ else
+ gr_def(ctx, offset + 0x1c, 0x00000100);
+ gr_def(ctx, offset + 0x20, 0x00003800);
+
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, base + 0x54c, 2);
+ if (!IS_NVA3F(dev_priv->chipset))
+ gr_def(ctx, base + 0x54c, 0x003fe006);
+ else
+ gr_def(ctx, base + 0x54c, 0x003fe007);
+ gr_def(ctx, base + 0x550, 0x003fe000);
+ }
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xa00;
+ else
+ offset = base + 0x680;
+ cp_ctx(ctx, offset, 1);
+ gr_def(ctx, offset, 0x00404040);
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xe00;
+ else
+ offset = base + 0x700;
+ cp_ctx(ctx, offset, 2);
+ if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, offset, 0x0077f005);
+ else if (dev_priv->chipset == 0xa5)
+ gr_def(ctx, offset, 0x6cf7f007);
+ else if (dev_priv->chipset == 0xa8)
+ gr_def(ctx, offset, 0x6cfff007);
+ else if (dev_priv->chipset == 0xac)
+ gr_def(ctx, offset, 0x0cfff007);
+ else
+ gr_def(ctx, offset, 0x0cf7f007);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x4, 0x00007fff);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, offset + 0x4, 0x003f7fff);
+ else
+ gr_def(ctx, offset + 0x4, 0x02bf7fff);
+ cp_ctx(ctx, offset + 0x2c, 1);
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, offset + 0x50, 9);
+ gr_def(ctx, offset + 0x54, 0x000003ff);
+ gr_def(ctx, offset + 0x58, 0x00000003);
+ gr_def(ctx, offset + 0x5c, 0x00000003);
+ gr_def(ctx, offset + 0x60, 0x000001ff);
+ gr_def(ctx, offset + 0x64, 0x0000001f);
+ gr_def(ctx, offset + 0x68, 0x0000000f);
+ gr_def(ctx, offset + 0x6c, 0x0000000f);
+ } else if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, offset + 0x50, 1);
+ cp_ctx(ctx, offset + 0x70, 1);
+ } else {
+ cp_ctx(ctx, offset + 0x50, 1);
+ cp_ctx(ctx, offset + 0x60, 5);
+ }
+ }
+ }
+}
+
+static void
+dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+ int i;
+ if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+ for (i = 0; i < num; i++)
+ nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
+ ctx->ctxvals_pos += num;
+}
+
+static void
+nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int base, num;
+ base = ctx->ctxvals_pos;
+
+ /* tesla state */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK135C */
+
+ /* SRC_TIC state */
+ dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */
+ dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */
+ dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */
+ dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */
+ dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */
+ if (dev_priv->chipset >= 0x94)
+ dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */
+ dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */
+ dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
+
+ /* turing state */
+ dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */
+ dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */
+ dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */
+ dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
+ dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
+ dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */
+ dd_emit(ctx, 1, 1); /* 00000001 LANES32 */
+ dd_emit(ctx, 1, 0); /* 000000ff UNK370 */
+ dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */
+ dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */
+ dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */
+ dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
+ dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
+ dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */
+ dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */
+ dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */
+ dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */
+ dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */
+ dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */
+ if (IS_NVA3F(dev_priv->chipset))
+ dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */
+ dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */
+ dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */
+ dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */
+ dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */
+ dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */
+ dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */
+ dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */
+ dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */
+
+ /* compat 2d state */
+ if (dev_priv->chipset == 0x50) {
+ dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */
+
+ dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */
+
+ dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */
+ dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */
+
+ dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */
+ dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */
+ dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */
+ dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */
+ dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */
+ dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */
+ dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */
+ dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */
+ dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */
+
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */
+ dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */
+ dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */
+
+ dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */
+ dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */
+
+ dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */
+
+ dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */
+ dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */
+ }
+
+ /* m2mf state */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */
+ dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */
+ dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */
+ dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */
+ dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */
+ dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */
+ dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */
+ dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */
+ dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */
+ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */
+
+ /* more compat 2d state */
+ if (dev_priv->chipset == 0x50) {
+ dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */
+
+ dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */
+
+ dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */
+ dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */
+ dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */
+ dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */
+ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */
+ dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */
+ dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */
+ dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */
+ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */
+ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */
+ dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */
+
+ dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */
+ }
+
+ /* tesla state */
+ dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* ffffffff */
+ dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */
+ dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */
+ dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */
+ dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */
+ dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ dd_emit(ctx, 1, 0); /* ffffffff */
+ dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
+ } else {
+ dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
+ }
+ dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
+ dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */
+ dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */
+ if (dev_priv->chipset == 0x50) {
+ dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ } else {
+ dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */
+ dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */
+ dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */
+ dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */
+ dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */
+ dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */
+ }
+ dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */
+ dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */
+ dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */
+ dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */
+ dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */
+ dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */
+ dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */
+ dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */
+ dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */
+ dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */
+ dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */
+ dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 3, 0); /* 1, 1, 1 */
+ else
+ dd_emit(ctx, 2, 0); /* 1, 1 */
+ dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */
+ dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
+ dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ dd_emit(ctx, 1, 3); /* 00000003 */
+ dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */
+ }
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */
+ dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
+ dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
+ dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
+ dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */
+ dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */
+ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */
+ dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
+ dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
+ dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
+ if (dev_priv->chipset >= 0xa0)
+ dd_emit(ctx, 1, 0); /* ffffffff */
+ dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
+ dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */
+ dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
+ dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
+ dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
+ if (dev_priv->chipset != 0x50)
+ dd_emit(ctx, 8, 0); /* 00000001 */
+ if (dev_priv->chipset >= 0xa0) {
+ dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */
+ dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */
+ dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */
+ dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */
+ }
+ dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */
+ dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */
+ dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset))
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */
+ if (dev_priv->chipset >= 0xa0)
+ dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
+ dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
+ if (dev_priv->chipset >= 0xa0)
+ dd_emit(ctx, 1, 0); /* 00000003 */
+ dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */
+ dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */
+ dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */
+ dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */
+ dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */
+ if (dev_priv->chipset != 0x50) {
+ dd_emit(ctx, 1, 0xe00); /* 7fff */
+ dd_emit(ctx, 1, 0x1000); /* 7fff */
+ dd_emit(ctx, 1, 0x1e00); /* 7fff */
+ }
+ dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */
+ dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */
+ dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */
+ dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */
+ dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
+ dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
+ dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
+ if (IS_NVA3F(dev_priv->chipset))
+ dd_emit(ctx, 1, 0x200);
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ if (dev_priv->chipset < 0xa0) {
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0x70); /* 000000ff */
+ dd_emit(ctx, 1, 0x80); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0x70); /* 000000ff */
+ dd_emit(ctx, 1, 0x80); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ } else {
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0xf0); /* 000000ff */
+ dd_emit(ctx, 1, 0xff); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 00000001 */
+ dd_emit(ctx, 1, 1); /* 00000001 */
+ dd_emit(ctx, 1, 0xf0); /* 000000ff */
+ dd_emit(ctx, 1, 0xff); /* 000000ff */
+ dd_emit(ctx, 1, 0); /* 000000ff */
+ dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */
+ }
+
+ /* eng2d state */
+ dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */
+ dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */
+ dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */
+ dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */
+ dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */
+ dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */
+ dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */
+ dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */
+ dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */
+ dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */
+ dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */
+ dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */
+ dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */
+ dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */
+ dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */
+ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */
+ dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */
+
+ num = ctx->ctxvals_pos - base;
+ ctx->ctxvals_pos = base;
+ if (IS_NVA3F(dev_priv->chipset))
+ cp_ctx(ctx, 0x404800, num);
+ else
+ cp_ctx(ctx, 0x405400, num);
+}
+
+/*
+ * xfer areas. These are a pain.
+ *
+ * There are 2 xfer areas: the first one is big and contains all sorts of
+ * stuff, the second is small and contains some per-TP context.
+ *
+ * Each area is split into 8 "strands". The areas, when saved to grctx,
+ * are made of 8-word blocks. Each block contains a single word from
+ * each strand. The strands are independent of each other, their
+ * addresses are unrelated to each other, and data in them is closely
+ * packed together. The strand layout varies a bit between cards: here
+ * and there, a single word is thrown out in the middle and the whole
+ * strand is offset by a bit from corresponding one on another chipset.
+ * For this reason, addresses of stuff in strands are almost useless.
+ * Knowing sequence of stuff and size of gaps between them is much more
+ * useful, and that's how we build the strands in our generator.
+ *
+ * NVA0 takes this mess to a whole new level by cutting the old strands
+ * into a few dozen pieces [known as genes], rearranging them randomly,
+ * and putting them back together to make new strands. Hopefully these
+ * genes correspond more or less directly to the same PGRAPH subunits
+ * as in 400040 register.
+ *
+ * The most common value in default context is 0, and when the genes
+ * are separated by 0's, gene bounduaries are quite speculative...
+ * some of them can be clearly deduced, others can be guessed, and yet
+ * others won't be resolved without figuring out the real meaning of
+ * given ctxval. For the same reason, ending point of each strand
+ * is unknown. Except for strand 0, which is the longest strand and
+ * its end corresponds to end of the whole xfer.
+ *
+ * An unsolved mystery is the seek instruction: it takes an argument
+ * in bits 8-18, and that argument is clearly the place in strands to
+ * seek to... but the offsets don't seem to correspond to offsets as
+ * seen in grctx. Perhaps there's another, real, not randomly-changing
+ * addressing in strands, and the xfer insn just happens to skip over
+ * the unused bits? NV10-NV30 PIPE comes to mind...
+ *
+ * As far as I know, there's no way to access the xfer areas directly
+ * without the help of ctxprog.
+ */
+
+static void
+xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+ int i;
+ if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+ for (i = 0; i < num; i++)
+ nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
+ ctx->ctxvals_pos += num << 3;
+}
+
+/* Gene declarations... */
+
+static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
+
+static void
+nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+ int offset;
+ int size = 0;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+
+ offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ ctx->ctxvals_base = offset;
+
+ if (dev_priv->chipset < 0xa0) {
+ /* Strand 0 */
+ ctx->ctxvals_pos = offset;
+ nv50_graph_construct_gene_dispatch(ctx);
+ nv50_graph_construct_gene_m2mf(ctx);
+ nv50_graph_construct_gene_unk24xx(ctx);
+ nv50_graph_construct_gene_clipid(ctx);
+ nv50_graph_construct_gene_zcull(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 1 */
+ ctx->ctxvals_pos = offset + 0x1;
+ nv50_graph_construct_gene_vfetch(ctx);
+ nv50_graph_construct_gene_eng2d(ctx);
+ nv50_graph_construct_gene_csched(ctx);
+ nv50_graph_construct_gene_ropm1(ctx);
+ nv50_graph_construct_gene_ropm2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 2 */
+ ctx->ctxvals_pos = offset + 0x2;
+ nv50_graph_construct_gene_ccache(ctx);
+ nv50_graph_construct_gene_unk1cxx(ctx);
+ nv50_graph_construct_gene_strmout(ctx);
+ nv50_graph_construct_gene_unk14xx(ctx);
+ nv50_graph_construct_gene_unk10xx(ctx);
+ nv50_graph_construct_gene_unk34xx(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 3: per-ROP group state */
+ ctx->ctxvals_pos = offset + 3;
+ for (i = 0; i < 6; i++)
+ if (units & (1 << (i + 16)))
+ nv50_graph_construct_gene_ropc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strands 4-7: per-TP state */
+ for (i = 0; i < 4; i++) {
+ ctx->ctxvals_pos = offset + 4 + i;
+ if (units & (1 << (2 * i)))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << (2 * i + 1)))
+ nv50_graph_construct_xfer_tp(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ } else {
+ /* Strand 0 */
+ ctx->ctxvals_pos = offset;
+ nv50_graph_construct_gene_dispatch(ctx);
+ nv50_graph_construct_gene_m2mf(ctx);
+ nv50_graph_construct_gene_unk34xx(ctx);
+ nv50_graph_construct_gene_csched(ctx);
+ nv50_graph_construct_gene_unk1cxx(ctx);
+ nv50_graph_construct_gene_strmout(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 1 */
+ ctx->ctxvals_pos = offset + 1;
+ nv50_graph_construct_gene_unk10xx(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 2 */
+ ctx->ctxvals_pos = offset + 2;
+ if (dev_priv->chipset == 0xa0)
+ nv50_graph_construct_gene_unk14xx(ctx);
+ nv50_graph_construct_gene_unk24xx(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 3 */
+ ctx->ctxvals_pos = offset + 3;
+ nv50_graph_construct_gene_vfetch(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 4 */
+ ctx->ctxvals_pos = offset + 4;
+ nv50_graph_construct_gene_ccache(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 5 */
+ ctx->ctxvals_pos = offset + 5;
+ nv50_graph_construct_gene_ropm2(ctx);
+ nv50_graph_construct_gene_ropm1(ctx);
+ /* per-ROP context */
+ for (i = 0; i < 8; i++)
+ if (units & (1<<(i+16)))
+ nv50_graph_construct_gene_ropc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 6 */
+ ctx->ctxvals_pos = offset + 6;
+ nv50_graph_construct_gene_zcull(ctx);
+ nv50_graph_construct_gene_clipid(ctx);
+ nv50_graph_construct_gene_eng2d(ctx);
+ if (units & (1 << 0))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 1))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 2))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 3))
+ nv50_graph_construct_xfer_tp(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 7 */
+ ctx->ctxvals_pos = offset + 7;
+ if (dev_priv->chipset == 0xa0) {
+ if (units & (1 << 4))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 5))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 6))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 7))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 8))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 9))
+ nv50_graph_construct_xfer_tp(ctx);
+ } else {
+ nv50_graph_construct_gene_unk14xx(ctx);
+ }
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+
+ ctx->ctxvals_pos = offset + size * 8;
+ ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ cp_lsr (ctx, offset);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, size);
+ cp_out (ctx, CP_SEEK_1);
+ cp_out (ctx, CP_XFER_1);
+ cp_wait(ctx, XFER, BUSY);
+}
+
+/*
+ * non-trivial demagiced parts of ctx init go here
+ */
+
+static void
+nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
+{
+ /* start of strand 0 */
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* SEEK */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 5, 0);
+ else if (!IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 6, 0);
+ else
+ xf_emit(ctx, 4, 0);
+ /* SEEK */
+ /* the PGRAPH's internal FIFO */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 8*3, 0);
+ else
+ xf_emit(ctx, 0x100*3, 0);
+ /* and another bonus slot?!? */
+ xf_emit(ctx, 3, 0);
+ /* and YET ANOTHER bonus slot? */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 3, 0);
+ /* SEEK */
+ /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ xf_emit(ctx, 9, 0);
+ /* SEEK */
+ if (dev_priv->chipset < 0x90)
+ xf_emit(ctx, 4, 0);
+ /* SEEK */
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 6*2, 0);
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 6*2, 0);
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x1c, 0);
+ else if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x1e, 0);
+ else
+ xf_emit(ctx, 0x22, 0);
+ /* SEEK */
+ xf_emit(ctx, 0x15, 0);
+}
+
+static void
+nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
+{
+ /* Strand 0, right after dispatch */
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int smallm2mf = 0;
+ if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98)
+ smallm2mf = 1;
+ /* SEEK */
+ xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
+ xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
+ xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
+ xf_emit (ctx, 1, 0); /* OFFSET_IN */
+ xf_emit (ctx, 1, 0); /* OFFSET_OUT */
+ xf_emit (ctx, 1, 0); /* PITCH_IN */
+ xf_emit (ctx, 1, 0); /* PITCH_OUT */
+ xf_emit (ctx, 1, 0); /* LINE_LENGTH */
+ xf_emit (ctx, 1, 0); /* LINE_COUNT */
+ xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */
+ xf_emit (ctx, 1, 1); /* LINEAR_IN */
+ xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */
+ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */
+ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */
+ xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */
+ xf_emit (ctx, 1, 1); /* LINEAR_OUT */
+ xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */
+ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */
+ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */
+ xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
+ xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
+ xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
+ /* SEEK */
+ if (smallm2mf)
+ xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */
+ else
+ xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */
+ xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */
+ /* SEEK */
+ if (smallm2mf)
+ xf_emit(ctx, 0x400, 0); /* ffffffff */
+ else
+ xf_emit(ctx, 0x800, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */
+ /* SEEK */
+ xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */
+ xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */
+}
+
+static void
+nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 2, 0); /* RO */
+ xf_emit(ctx, 0x800, 0); /* ffffffff */
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x92:
+ case 0xa0:
+ xf_emit(ctx, 0x2b, 0);
+ break;
+ case 0x84:
+ xf_emit(ctx, 0x29, 0);
+ break;
+ case 0x94:
+ case 0x96:
+ case 0xa3:
+ xf_emit(ctx, 0x27, 0);
+ break;
+ case 0x86:
+ case 0x98:
+ case 0xa5:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
+ xf_emit(ctx, 0x25, 0);
+ break;
+ }
+ /* CB bindings, 0x80 of them. first word is address >> 8, second is
+ * size >> 4 | valid << 24 */
+ xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */
+ xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */
+ xf_emit(ctx, 1, 0); /* 0 */
+ xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */
+ xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */
+ xf_emit(ctx, 4, 0); /* RO */
+ xf_emit(ctx, 0x100, 0); /* ffffffff */
+ xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */
+ xf_emit(ctx, 8, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 3 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */
+ xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
+ xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
+ xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
+ xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */
+ xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */
+ xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */
+}
+
+static void
+nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+ /* end of area 2 on pre-NVA0, area 1 on NVAx */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff); /* 000007ff */
+ xf_emit(ctx, 1, 0); /* 111/113 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ for (i = 0; i < 8; i++) {
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x86:
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ xf_emit(ctx, 0xa0, 0); /* ffffffff */
+ break;
+ case 0x84:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0x120, 0);
+ break;
+ case 0xa5:
+ case 0xa8:
+ xf_emit(ctx, 0x100, 0); /* ffffffff */
+ break;
+ case 0xa0:
+ case 0xa3:
+ case 0xaf:
+ xf_emit(ctx, 0x400, 0); /* ffffffff */
+ break;
+ }
+ xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ }
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+}
+
+static void
+nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* end of area 2 on pre-NVA0, area 1 on NVAx */
+ xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
+ xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
+ xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */
+ xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x0fac6881);
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ }
+}
+
+static void
+nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 5, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 2, 4); /* 7f, ff */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ }
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 3ff */
+ xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
+ xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
+ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
+ else
+ xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */
+ xf_emit(ctx, 3, 0); /* f, 0, 0 */
+ xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
+ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */
+ xf_emit(ctx, 3, 0); /* f, 0, 0 */
+ xf_emit(ctx, 3, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
+ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ }
+ xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */
+ xf_emit(ctx, 1, 0); /* f */
+ xf_emit(ctx, 1, 0); /* 0? */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 003fffff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+}
+
+static void
+nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */
+ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
+ xf_emit(ctx, 1, 0); /* 0000ffff */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
+ /* SEEK */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
+ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
+ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
+ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
+ xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */
+}
+
+static void
+nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
+{
+ /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */
+ /* SEEK */
+ xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */
+ xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */
+ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
+ xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */
+ xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */
+ xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */
+ xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */
+ xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */
+}
+
+static void
+nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+ /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 0x33, 0);
+ /* SEEK */
+ xf_emit(ctx, 2, 0);
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 4, 0); /* RO */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+ xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
+
+ xf_emit(ctx, 4, 0); /* RO */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+ xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
+ } else {
+ xf_emit(ctx, 0xc, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+
+ /* SEEK */
+ xf_emit(ctx, 0xc, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
+ xf_emit(ctx, 1, 0); /* 1ff */
+ xf_emit(ctx, 8, 0); /* 0? */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 1); /* 00000001 */
+ /* SEEK */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 2, 4); /* 000000ff */
+ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 1); /* 00000001 */
+ for (i = 0; i < 10; i++) {
+ /* SEEK */
+ xf_emit(ctx, 0x40, 0); /* ffffffff */
+ xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */
+ xf_emit(ctx, 0x10, 0); /* ffffffff */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */
+ xf_emit(ctx, 1, 1); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
+ xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 000003ff */
+}
+
+static void
+nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int acnt = 0x10, rep, i;
+ /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
+ if (IS_NVA3F(dev_priv->chipset))
+ acnt = 0x20;
+ /* SEEK */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */
+ xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */
+ }
+ xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */
+ xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */
+ xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */
+ xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */
+ xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0xb, 0); /* RO */
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0x9, 0); /* RO */
+ else
+ xf_emit(ctx, 0x8, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ /* SEEK */
+ xf_emit(ctx, 0xc, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 7f/ff */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */
+ xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
+ else
+ xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
+ if (dev_priv->chipset == 0xa8)
+ xf_emit(ctx, 1, 0x1e00); /* 7fff */
+ /* SEEK */
+ xf_emit(ctx, 0xc, 0); /* RO or close */
+ /* SEEK */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 2, 0); /* ffffffff */
+ else
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 0x10, 0); /* 0? */
+ xf_emit(ctx, 2, 0); /* weird... */
+ xf_emit(ctx, 2, 0); /* RO */
+ } else {
+ xf_emit(ctx, 8, 0); /* 0? */
+ xf_emit(ctx, 1, 0); /* weird... */
+ xf_emit(ctx, 2, 0); /* RO */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */
+ xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */
+ xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
+ xf_emit(ctx, 1, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
+ xf_emit(ctx, 1, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* RO */
+ xf_emit(ctx, 2, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */
+ xf_emit(ctx, 1, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, acnt, 0); /* f */
+ xf_emit(ctx, 3, 0); /* f/1f */
+ }
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 2, 0); /* RO */
+ else
+ xf_emit(ctx, 5, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */
+ /* SEEK */
+ if (dev_priv->chipset < 0xa0) {
+ xf_emit(ctx, 0x41, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0x11, 0); /* RO */
+ } else if (!IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x50, 0); /* RO */
+ else
+ xf_emit(ctx, 0x58, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, 1, 1); /* 1 UNK0DEC */
+ /* SEEK */
+ xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */
+ xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x1d, 0); /* RO */
+ else
+ xf_emit(ctx, 0x16, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ /* SEEK */
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 8, 0); /* RO */
+ else if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0xc, 0); /* RO */
+ else
+ xf_emit(ctx, 7, 0); /* RO */
+ /* SEEK */
+ xf_emit(ctx, 0xa, 0); /* RO */
+ if (dev_priv->chipset == 0xa0)
+ rep = 0xc;
+ else
+ rep = 4;
+ for (i = 0; i < rep; i++) {
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x20, 0); /* ffffffff */
+ xf_emit(ctx, 0x200, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ }
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 113/111 */
+ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
+ xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */
+ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 7, 0); /* weird... */
+ else
+ xf_emit(ctx, 5, 0); /* weird... */
+}
+
+static void
+nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
+ xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */
+ xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */
+ if (dev_priv->chipset < 0xa0) {
+ /* this is useless on everything but the original NV50,
+ * guess they forgot to nuke it. Or just didn't bother. */
+ xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */
+ xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */
+ xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */
+ }
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */
+ xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */
+ xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */
+ xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */
+ xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */
+ xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */
+ xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */
+ xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
+ xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */
+ xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */
+ xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */
+ xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */
+ xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */
+ xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK888 */
+ xf_emit(ctx, 1, 4); /* 0000003f UNK884 */
+ xf_emit(ctx, 1, 0); /* 00000007 UNK880 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */
+ xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */
+ xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK260 */
+ xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */
+ /* SEEK */
+ xf_emit(ctx, 0x10, 0);
+ /* SEEK */
+ xf_emit(ctx, 0x27, 0);
+}
+
+static void
+nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
+ /* SEEK */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */
+ xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */
+ xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */
+ xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */
+ xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */
+ xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */
+ xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */
+ xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */
+ xf_emit(ctx, 1, 1); /* 00000001 LANES32 */
+ xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
+ /* SEEK */
+ xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x92:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x80, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */
+ break;
+ case 0x84:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x60, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
+ break;
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x40, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */
+ break;
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */
+ xf_emit(ctx, 0x10, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xa0:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0xf0, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xa3:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x60, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xa5:
+ case 0xaf:
+ xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
+ xf_emit(ctx, 0x30, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */
+ break;
+ case 0xaa:
+ xf_emit(ctx, 0x12, 0);
+ break;
+ case 0xa8:
+ case 0xac:
+ xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */
+ xf_emit(ctx, 0x10, 0); /* fff */
+ xf_emit(ctx, 2, 0); /* ff, fff */
+ xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */
+ break;
+ }
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000000 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 0000001f */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 4, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 000000ff */
+}
+
+static void
+nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */
+ xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */
+ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */
+ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */
+ xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
+ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */
+ xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */
+ xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0); /* 3ff */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */
+ }
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
+ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
+ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x1c, 0); /* RO */
+ else if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x9, 0);
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
+ xf_emit(ctx, 1, 0); /* 3ff */
+ }
+ /* XXX: the following block could belong either to unk1cxx, or
+ * to STRMOUT. Rather hard to tell. */
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x25, 0);
+ else
+ xf_emit(ctx, 0x3b, 0);
+}
+
+static void
+nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
+ xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
+ xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
+ }
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
+ else
+ xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
+ xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
+ xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */
+ xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */
+ xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
+ xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
+ }
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
+ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
+ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */
+ xf_emit(ctx, 2, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ /* SEEK */
+ xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000000? */
+ xf_emit(ctx, 2, 0); /* ffffffff */
+}
+
+static void
+nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+}
+
+static void
+nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 2, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
+ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 7 */
+ /* SEEK */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
+ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
+ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+}
+
+static void
+nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic2;
+ if (dev_priv->chipset == 0x50) {
+ magic2 = 0x00003e60;
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
+ magic2 = 0x001ffe67;
+ } else {
+ magic2 = 0x00087e67;
+ }
+ xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset))
+ xf_emit(ctx, 1, 0x15); /* 000000ff */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */
+ xf_emit(ctx, 1, 4); /* 7 */
+ xf_emit(ctx, 1, 0x400); /* fffffff */
+ xf_emit(ctx, 1, 0x300); /* ffff */
+ xf_emit(ctx, 1, 0x1001); /* 1fff */
+ if (dev_priv->chipset != 0xa0) {
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */
+ else
+ xf_emit(ctx, 1, 0x15); /* ff */
+ }
+ }
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
+ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
+ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */
+ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1001);
+ xf_emit(ctx, 0xb, 0);
+ } else {
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ }
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
+ xf_emit(ctx, 1, 0); /* 000000ff */
+ }
+ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 0000000f */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ } else if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ } else {
+ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ }
+ xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */
+ xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */
+ xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 2, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */
+ }
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */
+ xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */
+ xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */
+ xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */
+ xf_emit(ctx, 1, 0); /* 000000ff ROP */
+ xf_emit(ctx, 1, 0); /* ffffffff BETA1 */
+ xf_emit(ctx, 1, 0); /* ffffffff BETA4 */
+ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
+ xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */
+}
+
+static void
+nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic3;
+ switch (dev_priv->chipset) {
+ case 0x50:
+ magic3 = 0x1000;
+ break;
+ case 0x86:
+ case 0x98:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
+ magic3 = 0x1e00;
+ break;
+ default:
+ magic3 = 0;
+ }
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x1f, 0); /* ffffffff */
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0x0f, 0); /* ffffffff */
+ else
+ xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */
+ xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x03020100); /* ffffffff */
+ else
+ xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+ if (magic3)
+ xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113 */
+ xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */
+ xf_emit(ctx, 1, 0); /* 0000001f */
+ xf_emit(ctx, 1, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */
+ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+ if (magic3)
+ xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113 */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */
+ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ xf_emit(ctx, 1, 0); /* 111/113 */
+ if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
+ xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
+ else if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
+ else if (!IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x210, 0); /* ffffffff */
+ else
+ xf_emit(ctx, 0x410, 0); /* ffffffff */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
+ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
+ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+}
+
+static void
+nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic1, magic2;
+ if (dev_priv->chipset == 0x50) {
+ magic1 = 0x3ff;
+ magic2 = 0x00003e60;
+ } else if (!IS_NVA3F(dev_priv->chipset)) {
+ magic1 = 0x7ff;
+ magic2 = 0x001ffe67;
+ } else {
+ magic1 = 0x7ff;
+ magic2 = 0x00087e67;
+ }
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
+ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */
+ xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */
+ xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */
+ xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */
+ } else if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ } else {
+ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
+ }
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
+ }
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */
+ xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0); /* ff */
+ else
+ xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */
+ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */
+ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */
+ xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */
+ xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
+ xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */
+ xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */
+ xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */
+ xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */
+ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
+ xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */
+ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */
+ xf_emit(ctx, 1, 0); /* 0000ffff */
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 2, 0); /* ffff, ff/3ff */
+ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */
+ xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */
+ xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */
+ xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */
+ xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 0); /* 00000001 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
+ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
+ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
+ xf_emit(ctx, 1, 0); /* 7 */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ }
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x0fac6881); /* fffffff */
+ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
+ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
+ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */
+ }
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */
+ xf_emit(ctx, 1, 0xfac6881); /* fffffff */
+ xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */
+ xf_emit(ctx, 1, 4); /* 7 */
+ xf_emit(ctx, 1, 0); /* 1 */
+ xf_emit(ctx, 2, 1); /* 1 */
+ xf_emit(ctx, 2, 0); /* 7, f */
+ xf_emit(ctx, 1, 1); /* 1 */
+ xf_emit(ctx, 1, 0); /* 7/f */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 0x9, 0); /* 1 */
+ else
+ xf_emit(ctx, 0x8, 0); /* 1 */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 8, 1); /* 1 */
+ xf_emit(ctx, 1, 0x11); /* 7f */
+ xf_emit(ctx, 7, 0); /* 7f */
+ xf_emit(ctx, 1, 0xfac6881); /* fffffff */
+ xf_emit(ctx, 1, 0xf); /* f */
+ xf_emit(ctx, 7, 0); /* f */
+ xf_emit(ctx, 1, 0x11); /* 7f */
+ xf_emit(ctx, 1, 1); /* 1 */
+ xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ }
+ }
+}
+
+static void
+nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0); /* 3 */
+ xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */
+ xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */
+ xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */
+ xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */
+ else
+ xf_emit(ctx, 2, 0); /* 3ff, 1 */
+ xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */
+ xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */
+ xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */
+ xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */
+ xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */
+ xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */
+ xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */
+ xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */
+ if (dev_priv->chipset == 0x50) {
+ xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
+ xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */
+ xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */
+ } else if (!IS_NVAAF(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */
+ xf_emit(ctx, 1, 0); /* 00000003 */
+ xf_emit(ctx, 1, 0); /* 000003ff */
+ } else {
+ xf_emit(ctx, 0x6, 0);
+ }
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */
+ xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */
+}
+
+static void
+nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */
+ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
+ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
+ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */
+ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
+ xf_emit(ctx, 1, 0); /* ffff0ff3 */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */
+ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
+ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */
+ xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */
+ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
+ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
+}
+
+static void
+nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ if (dev_priv->chipset < 0xa0) {
+ nv50_graph_construct_xfer_unk84xx(ctx);
+ nv50_graph_construct_xfer_tprop(ctx);
+ nv50_graph_construct_xfer_tex(ctx);
+ nv50_graph_construct_xfer_unk8cxx(ctx);
+ } else {
+ nv50_graph_construct_xfer_tex(ctx);
+ nv50_graph_construct_xfer_tprop(ctx);
+ nv50_graph_construct_xfer_unk8cxx(ctx);
+ nv50_graph_construct_xfer_unk84xx(ctx);
+ }
+}
+
+static void
+nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i, mpcnt = 2;
+ switch (dev_priv->chipset) {
+ case 0x98:
+ case 0xaa:
+ mpcnt = 1;
+ break;
+ case 0x50:
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0xa8:
+ case 0xac:
+ mpcnt = 2;
+ break;
+ case 0xa0:
+ case 0xa3:
+ case 0xa5:
+ case 0xaf:
+ mpcnt = 3;
+ break;
+ }
+ for (i = 0; i < mpcnt; i++) {
+ xf_emit(ctx, 1, 0); /* ff */
+ xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */
+ xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */
+ xf_emit(ctx, 1, 0x04000400); /* ffffffff */
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */
+ xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
+ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0xe00); /* 7fff */
+ xf_emit(ctx, 1, 0x1e00); /* 7fff */
+ }
+ xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */
+ xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
+ xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
+ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
+ if (IS_NVAAF(dev_priv->chipset))
+ xf_emit(ctx, 0xb, 0); /* RO */
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0xc, 0); /* RO */
+ else
+ xf_emit(ctx, 0xa, 0); /* RO */
+ }
+ xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */
+ }
+ xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */
+ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
+ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
+ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
+ xf_emit(ctx, 1, 1); /* 00000001 LANES32 */
+ xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */
+ xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */
+ xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */
+ xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
+ xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
+ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
+ xf_emit(ctx, 1, 0); /* ff/3ff */
+ xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */
+ xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */
+ xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */
+ xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
+ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
+ xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */
+ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
+ xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */
+ xf_emit(ctx, 1, 0); /* 00000007 */
+ xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */
+ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
+ if (IS_NVA3F(dev_priv->chipset))
+ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
+ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
+ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
+ xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */
+ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
+ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
+ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
+ if (IS_NVA3F(dev_priv->chipset)) {
+ xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
+ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
+ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
+ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
+ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
+ }
+ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
+ xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
+ /* XXX: demagic this part some day */
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x3a0, 0);
+ else if (dev_priv->chipset < 0x94)
+ xf_emit(ctx, 0x3a2, 0);
+ else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
+ xf_emit(ctx, 0x39f, 0);
+ else
+ xf_emit(ctx, 0x3a3, 0);
+ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
+ xf_emit(ctx, 1, 0); /* 7 OPERATION */
+ xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */
+ xf_emit(ctx, 0x2d, 0);
+}
+
+static void
+nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+ uint32_t offset;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+ int size = 0;
+
+ offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
+
+ if (dev_priv->chipset < 0xa0) {
+ for (i = 0; i < 8; i++) {
+ ctx->ctxvals_pos = offset + i;
+ /* that little bugger belongs to csched. No idea
+ * what it's doing here. */
+ if (i == 0)
+ xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
+ if (units & (1 << i))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ } else {
+ /* Strand 0: TPs 0, 1 */
+ ctx->ctxvals_pos = offset;
+ /* that little bugger belongs to csched. No idea
+ * what it's doing here. */
+ xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
+ if (units & (1 << 0))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if (units & (1 << 1))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 1: TPs 2, 3 */
+ ctx->ctxvals_pos = offset + 1;
+ if (units & (1 << 2))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if (units & (1 << 3))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 2: TPs 4, 5, 6 */
+ ctx->ctxvals_pos = offset + 2;
+ if (units & (1 << 4))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if (units & (1 << 5))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if (units & (1 << 6))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 3: TPs 7, 8, 9 */
+ ctx->ctxvals_pos = offset + 3;
+ if (units & (1 << 7))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if (units & (1 << 8))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if (units & (1 << 9))
+ nv50_graph_construct_xfer_mpc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ ctx->ctxvals_pos = offset + size * 8;
+ ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ cp_lsr (ctx, offset);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, size);
+ cp_out (ctx, CP_SEEK_2);
+ cp_out (ctx, CP_XFER_2);
+ cp_wait(ctx, XFER, BUSY);
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <core/mm.h>
+#include "nvc0.h"
+
+static void
+nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
+{
+ nv_wr32(dev, 0x400204, data);
+ nv_wr32(dev, 0x400200, icmd);
+ while (nv_rd32(dev, 0x400700) & 2) {}
+}
+
+static void
+nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
+{
+ nv_wr32(dev, 0x40448c, data);
+ nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+static void
+nvc0_grctx_generate_9097(struct drm_device *dev)
+{
+ u32 fermi = nvc0_graph_class(dev);
+ u32 mthd;
+
+ nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
+ nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
+ nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
+ nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
+ nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
+ nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
+ nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
+ nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
+ if (fermi == 0x9097) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(dev, 0x9097, mthd, 0x00000000);
+ }
+ nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
+ nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
+ nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
+ nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
+ nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
+ nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
+ nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
+ nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
+ nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
+ nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
+ nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
+ nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
+ nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
+ nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
+ nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
+ nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
+ nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
+ nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
+ nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
+ nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
+ nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
+ nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
+ nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
+ nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
+ nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
+ nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
+ nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
+ nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
+ nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
+ nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
+ nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
+ nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
+ nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
+ nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
+ nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
+ nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
+ nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
+ nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
+ nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
+ nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
+ nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
+ nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
+ nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
+ nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
+ nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
+ nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
+ nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
+ nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
+ nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
+ nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
+ nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
+ nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
+ nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
+ nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
+ nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
+ nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
+ nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
+ nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
+ nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
+ nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
+ nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
+ nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
+ nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
+ nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
+ nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
+ /* in trace, right after 0x90c0, not here */
+ nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
+}
+
+static void
+nvc0_grctx_generate_9197(struct drm_device *dev)
+{
+ u32 fermi = nvc0_graph_class(dev);
+ u32 mthd;
+
+ if (fermi == 0x9197) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(dev, 0x9197, mthd, 0x00000000);
+ }
+ nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
+}
+
+static void
+nvc0_grctx_generate_9297(struct drm_device *dev)
+{
+ u32 fermi = nvc0_graph_class(dev);
+ u32 mthd;
+
+ if (fermi == 0x9297) {
+ for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
+ nv_mthd(dev, 0x9297, mthd, 0x00000000);
+ }
+ nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
+ nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
+}
+
+static void
+nvc0_grctx_generate_902d(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
+}
+
+static void
+nvc0_grctx_generate_9039(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
+ nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_90c0(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+ nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
+ }
+ nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
+ for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
+ nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
+ nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
+ nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
+ nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
+ }
+ nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
+ nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
+ nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
+ nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_dispatch(struct drm_device *dev)
+{
+ int i;
+
+ nv_wr32(dev, 0x404004, 0x00000000);
+ nv_wr32(dev, 0x404008, 0x00000000);
+ nv_wr32(dev, 0x40400c, 0x00000000);
+ nv_wr32(dev, 0x404010, 0x00000000);
+ nv_wr32(dev, 0x404014, 0x00000000);
+ nv_wr32(dev, 0x404018, 0x00000000);
+ nv_wr32(dev, 0x40401c, 0x00000000);
+ nv_wr32(dev, 0x404020, 0x00000000);
+ nv_wr32(dev, 0x404024, 0x00000000);
+ nv_wr32(dev, 0x404028, 0x00000000);
+ nv_wr32(dev, 0x40402c, 0x00000000);
+ nv_wr32(dev, 0x404044, 0x00000000);
+ nv_wr32(dev, 0x404094, 0x00000000);
+ nv_wr32(dev, 0x404098, 0x00000000);
+ nv_wr32(dev, 0x40409c, 0x00000000);
+ nv_wr32(dev, 0x4040a0, 0x00000000);
+ nv_wr32(dev, 0x4040a4, 0x00000000);
+ nv_wr32(dev, 0x4040a8, 0x00000000);
+ nv_wr32(dev, 0x4040ac, 0x00000000);
+ nv_wr32(dev, 0x4040b0, 0x00000000);
+ nv_wr32(dev, 0x4040b4, 0x00000000);
+ nv_wr32(dev, 0x4040b8, 0x00000000);
+ nv_wr32(dev, 0x4040bc, 0x00000000);
+ nv_wr32(dev, 0x4040c0, 0x00000000);
+ nv_wr32(dev, 0x4040c4, 0x00000000);
+ nv_wr32(dev, 0x4040c8, 0xf0000087);
+ nv_wr32(dev, 0x4040d4, 0x00000000);
+ nv_wr32(dev, 0x4040d8, 0x00000000);
+ nv_wr32(dev, 0x4040dc, 0x00000000);
+ nv_wr32(dev, 0x4040e0, 0x00000000);
+ nv_wr32(dev, 0x4040e4, 0x00000000);
+ nv_wr32(dev, 0x4040e8, 0x00001000);
+ nv_wr32(dev, 0x4040f8, 0x00000000);
+ nv_wr32(dev, 0x404130, 0x00000000);
+ nv_wr32(dev, 0x404134, 0x00000000);
+ nv_wr32(dev, 0x404138, 0x20000040);
+ nv_wr32(dev, 0x404150, 0x0000002e);
+ nv_wr32(dev, 0x404154, 0x00000400);
+ nv_wr32(dev, 0x404158, 0x00000200);
+ nv_wr32(dev, 0x404164, 0x00000055);
+ nv_wr32(dev, 0x404168, 0x00000000);
+ nv_wr32(dev, 0x404174, 0x00000000);
+ nv_wr32(dev, 0x404178, 0x00000000);
+ nv_wr32(dev, 0x40417c, 0x00000000);
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
+}
+
+static void
+nvc0_grctx_generate_macro(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404404, 0x00000000);
+ nv_wr32(dev, 0x404408, 0x00000000);
+ nv_wr32(dev, 0x40440c, 0x00000000);
+ nv_wr32(dev, 0x404410, 0x00000000);
+ nv_wr32(dev, 0x404414, 0x00000000);
+ nv_wr32(dev, 0x404418, 0x00000000);
+ nv_wr32(dev, 0x40441c, 0x00000000);
+ nv_wr32(dev, 0x404420, 0x00000000);
+ nv_wr32(dev, 0x404424, 0x00000000);
+ nv_wr32(dev, 0x404428, 0x00000000);
+ nv_wr32(dev, 0x40442c, 0x00000000);
+ nv_wr32(dev, 0x404430, 0x00000000);
+ nv_wr32(dev, 0x404434, 0x00000000);
+ nv_wr32(dev, 0x404438, 0x00000000);
+ nv_wr32(dev, 0x404460, 0x00000000);
+ nv_wr32(dev, 0x404464, 0x00000000);
+ nv_wr32(dev, 0x404468, 0x00ffffff);
+ nv_wr32(dev, 0x40446c, 0x00000000);
+ nv_wr32(dev, 0x404480, 0x00000001);
+ nv_wr32(dev, 0x404498, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_m2mf(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404604, 0x00000015);
+ nv_wr32(dev, 0x404608, 0x00000000);
+ nv_wr32(dev, 0x40460c, 0x00002e00);
+ nv_wr32(dev, 0x404610, 0x00000100);
+ nv_wr32(dev, 0x404618, 0x00000000);
+ nv_wr32(dev, 0x40461c, 0x00000000);
+ nv_wr32(dev, 0x404620, 0x00000000);
+ nv_wr32(dev, 0x404624, 0x00000000);
+ nv_wr32(dev, 0x404628, 0x00000000);
+ nv_wr32(dev, 0x40462c, 0x00000000);
+ nv_wr32(dev, 0x404630, 0x00000000);
+ nv_wr32(dev, 0x404634, 0x00000000);
+ nv_wr32(dev, 0x404638, 0x00000004);
+ nv_wr32(dev, 0x40463c, 0x00000000);
+ nv_wr32(dev, 0x404640, 0x00000000);
+ nv_wr32(dev, 0x404644, 0x00000000);
+ nv_wr32(dev, 0x404648, 0x00000000);
+ nv_wr32(dev, 0x40464c, 0x00000000);
+ nv_wr32(dev, 0x404650, 0x00000000);
+ nv_wr32(dev, 0x404654, 0x00000000);
+ nv_wr32(dev, 0x404658, 0x00000000);
+ nv_wr32(dev, 0x40465c, 0x007f0100);
+ nv_wr32(dev, 0x404660, 0x00000000);
+ nv_wr32(dev, 0x404664, 0x00000000);
+ nv_wr32(dev, 0x404668, 0x00000000);
+ nv_wr32(dev, 0x40466c, 0x00000000);
+ nv_wr32(dev, 0x404670, 0x00000000);
+ nv_wr32(dev, 0x404674, 0x00000000);
+ nv_wr32(dev, 0x404678, 0x00000000);
+ nv_wr32(dev, 0x40467c, 0x00000002);
+ nv_wr32(dev, 0x404680, 0x00000000);
+ nv_wr32(dev, 0x404684, 0x00000000);
+ nv_wr32(dev, 0x404688, 0x00000000);
+ nv_wr32(dev, 0x40468c, 0x00000000);
+ nv_wr32(dev, 0x404690, 0x00000000);
+ nv_wr32(dev, 0x404694, 0x00000000);
+ nv_wr32(dev, 0x404698, 0x00000000);
+ nv_wr32(dev, 0x40469c, 0x00000000);
+ nv_wr32(dev, 0x4046a0, 0x007f0080);
+ nv_wr32(dev, 0x4046a4, 0x00000000);
+ nv_wr32(dev, 0x4046a8, 0x00000000);
+ nv_wr32(dev, 0x4046ac, 0x00000000);
+ nv_wr32(dev, 0x4046b0, 0x00000000);
+ nv_wr32(dev, 0x4046b4, 0x00000000);
+ nv_wr32(dev, 0x4046b8, 0x00000000);
+ nv_wr32(dev, 0x4046bc, 0x00000000);
+ nv_wr32(dev, 0x4046c0, 0x00000000);
+ nv_wr32(dev, 0x4046c4, 0x00000000);
+ nv_wr32(dev, 0x4046c8, 0x00000000);
+ nv_wr32(dev, 0x4046cc, 0x00000000);
+ nv_wr32(dev, 0x4046d0, 0x00000000);
+ nv_wr32(dev, 0x4046d4, 0x00000000);
+ nv_wr32(dev, 0x4046d8, 0x00000000);
+ nv_wr32(dev, 0x4046dc, 0x00000000);
+ nv_wr32(dev, 0x4046e0, 0x00000000);
+ nv_wr32(dev, 0x4046e4, 0x00000000);
+ nv_wr32(dev, 0x4046e8, 0x00000000);
+ nv_wr32(dev, 0x4046f0, 0x00000000);
+ nv_wr32(dev, 0x4046f4, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk47xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404700, 0x00000000);
+ nv_wr32(dev, 0x404704, 0x00000000);
+ nv_wr32(dev, 0x404708, 0x00000000);
+ nv_wr32(dev, 0x40470c, 0x00000000);
+ nv_wr32(dev, 0x404710, 0x00000000);
+ nv_wr32(dev, 0x404714, 0x00000000);
+ nv_wr32(dev, 0x404718, 0x00000000);
+ nv_wr32(dev, 0x40471c, 0x00000000);
+ nv_wr32(dev, 0x404720, 0x00000000);
+ nv_wr32(dev, 0x404724, 0x00000000);
+ nv_wr32(dev, 0x404728, 0x00000000);
+ nv_wr32(dev, 0x40472c, 0x00000000);
+ nv_wr32(dev, 0x404730, 0x00000000);
+ nv_wr32(dev, 0x404734, 0x00000100);
+ nv_wr32(dev, 0x404738, 0x00000000);
+ nv_wr32(dev, 0x40473c, 0x00000000);
+ nv_wr32(dev, 0x404740, 0x00000000);
+ nv_wr32(dev, 0x404744, 0x00000000);
+ nv_wr32(dev, 0x404748, 0x00000000);
+ nv_wr32(dev, 0x40474c, 0x00000000);
+ nv_wr32(dev, 0x404750, 0x00000000);
+ nv_wr32(dev, 0x404754, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_shaders(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset == 0xd9) {
+ nv_wr32(dev, 0x405800, 0x0f8000bf);
+ nv_wr32(dev, 0x405830, 0x02180218);
+ nv_wr32(dev, 0x405834, 0x08000000);
+ } else
+ if (dev_priv->chipset == 0xc1) {
+ nv_wr32(dev, 0x405800, 0x0f8000bf);
+ nv_wr32(dev, 0x405830, 0x02180218);
+ nv_wr32(dev, 0x405834, 0x00000000);
+ } else {
+ nv_wr32(dev, 0x405800, 0x078000bf);
+ nv_wr32(dev, 0x405830, 0x02180000);
+ nv_wr32(dev, 0x405834, 0x00000000);
+ }
+ nv_wr32(dev, 0x405838, 0x00000000);
+ nv_wr32(dev, 0x405854, 0x00000000);
+ nv_wr32(dev, 0x405870, 0x00000001);
+ nv_wr32(dev, 0x405874, 0x00000001);
+ nv_wr32(dev, 0x405878, 0x00000001);
+ nv_wr32(dev, 0x40587c, 0x00000001);
+ nv_wr32(dev, 0x405a00, 0x00000000);
+ nv_wr32(dev, 0x405a04, 0x00000000);
+ nv_wr32(dev, 0x405a18, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_unk60xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x406020, 0x000103c1);
+ nv_wr32(dev, 0x406028, 0x00000001);
+ nv_wr32(dev, 0x40602c, 0x00000001);
+ nv_wr32(dev, 0x406030, 0x00000001);
+ nv_wr32(dev, 0x406034, 0x00000001);
+}
+
+static void
+nvc0_grctx_generate_unk64xx(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, 0x4064a8, 0x00000000);
+ nv_wr32(dev, 0x4064ac, 0x00003fff);
+ nv_wr32(dev, 0x4064b4, 0x00000000);
+ nv_wr32(dev, 0x4064b8, 0x00000000);
+ if (dev_priv->chipset == 0xd9)
+ nv_wr32(dev, 0x4064bc, 0x00000000);
+ if (dev_priv->chipset == 0xc1 ||
+ dev_priv->chipset == 0xd9) {
+ nv_wr32(dev, 0x4064c0, 0x80140078);
+ nv_wr32(dev, 0x4064c4, 0x0086ffff);
+ }
+}
+
+static void
+nvc0_grctx_generate_tpbus(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407804, 0x00000023);
+ nv_wr32(dev, 0x40780c, 0x0a418820);
+ nv_wr32(dev, 0x407810, 0x062080e6);
+ nv_wr32(dev, 0x407814, 0x020398a4);
+ nv_wr32(dev, 0x407818, 0x0e629062);
+ nv_wr32(dev, 0x40781c, 0x0a418820);
+ nv_wr32(dev, 0x407820, 0x000000e6);
+ nv_wr32(dev, 0x4078bc, 0x00000103);
+}
+
+static void
+nvc0_grctx_generate_ccache(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408000, 0x00000000);
+ nv_wr32(dev, 0x408004, 0x00000000);
+ nv_wr32(dev, 0x408008, 0x00000018);
+ nv_wr32(dev, 0x40800c, 0x00000000);
+ nv_wr32(dev, 0x408010, 0x00000000);
+ nv_wr32(dev, 0x408014, 0x00000069);
+ nv_wr32(dev, 0x408018, 0xe100e100);
+ nv_wr32(dev, 0x408064, 0x00000000);
+}
+
+static void
+nvc0_grctx_generate_rop(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chipset = dev_priv->chipset;
+
+ /* ROPC_BROADCAST */
+ nv_wr32(dev, 0x408800, 0x02802a3c);
+ nv_wr32(dev, 0x408804, 0x00000040);
+ if (chipset == 0xd9) {
+ nv_wr32(dev, 0x408808, 0x1043e005);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x1043e005);
+ nv_wr32(dev, 0x408908, 0x00c8102f);
+ } else
+ if (chipset == 0xc1) {
+ nv_wr32(dev, 0x408808, 0x1003e005);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x62000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ } else {
+ nv_wr32(dev, 0x408808, 0x0003e00d);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x02000001);
+ nv_wr32(dev, 0x408908, 0x00c80929);
+ }
+ nv_wr32(dev, 0x40890c, 0x00000000);
+ nv_wr32(dev, 0x408980, 0x0000011d);
+}
+
+static void
+nvc0_grctx_generate_gpc(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chipset = dev_priv->chipset;
+ int i;
+
+ /* GPC_BROADCAST */
+ nv_wr32(dev, 0x418380, 0x00000016);
+ nv_wr32(dev, 0x418400, 0x38004e00);
+ nv_wr32(dev, 0x418404, 0x71e0ffff);
+ nv_wr32(dev, 0x418408, 0x00000000);
+ nv_wr32(dev, 0x41840c, 0x00001008);
+ nv_wr32(dev, 0x418410, 0x0fff0fff);
+ nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
+ nv_wr32(dev, 0x418450, 0x00000000);
+ nv_wr32(dev, 0x418454, 0x00000000);
+ nv_wr32(dev, 0x418458, 0x00000000);
+ nv_wr32(dev, 0x41845c, 0x00000000);
+ nv_wr32(dev, 0x418460, 0x00000000);
+ nv_wr32(dev, 0x418464, 0x00000000);
+ nv_wr32(dev, 0x418468, 0x00000001);
+ nv_wr32(dev, 0x41846c, 0x00000000);
+ nv_wr32(dev, 0x418470, 0x00000000);
+ nv_wr32(dev, 0x418600, 0x0000001f);
+ nv_wr32(dev, 0x418684, 0x0000000f);
+ nv_wr32(dev, 0x418700, 0x00000002);
+ nv_wr32(dev, 0x418704, 0x00000080);
+ nv_wr32(dev, 0x418708, 0x00000000);
+ nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
+ nv_wr32(dev, 0x418710, 0x00000000);
+ nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
+ nv_wr32(dev, 0x418808, 0x00000000);
+ nv_wr32(dev, 0x41880c, 0x00000000);
+ nv_wr32(dev, 0x418810, 0x00000000);
+ nv_wr32(dev, 0x418828, 0x00008442);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x418830, 0x10000001);
+ else
+ nv_wr32(dev, 0x418830, 0x00000001);
+ nv_wr32(dev, 0x4188d8, 0x00000008);
+ nv_wr32(dev, 0x4188e0, 0x01000000);
+ nv_wr32(dev, 0x4188e8, 0x00000000);
+ nv_wr32(dev, 0x4188ec, 0x00000000);
+ nv_wr32(dev, 0x4188f0, 0x00000000);
+ nv_wr32(dev, 0x4188f4, 0x00000000);
+ nv_wr32(dev, 0x4188f8, 0x00000000);
+ if (chipset == 0xd9)
+ nv_wr32(dev, 0x4188fc, 0x20100008);
+ else if (chipset == 0xc1)
+ nv_wr32(dev, 0x4188fc, 0x00100018);
+ else
+ nv_wr32(dev, 0x4188fc, 0x00100000);
+ nv_wr32(dev, 0x41891c, 0x00ff00ff);
+ nv_wr32(dev, 0x418924, 0x00000000);
+ nv_wr32(dev, 0x418928, 0x00ffff00);
+ nv_wr32(dev, 0x41892c, 0x0000ff00);
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
+ nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
+ nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
+ }
+ nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
+ nv_wr32(dev, 0x418b08, 0x0a418820);
+ nv_wr32(dev, 0x418b0c, 0x062080e6);
+ nv_wr32(dev, 0x418b10, 0x020398a4);
+ nv_wr32(dev, 0x418b14, 0x0e629062);
+ nv_wr32(dev, 0x418b18, 0x0a418820);
+ nv_wr32(dev, 0x418b1c, 0x000000e6);
+ nv_wr32(dev, 0x418bb8, 0x00000103);
+ nv_wr32(dev, 0x418c08, 0x00000001);
+ nv_wr32(dev, 0x418c10, 0x00000000);
+ nv_wr32(dev, 0x418c14, 0x00000000);
+ nv_wr32(dev, 0x418c18, 0x00000000);
+ nv_wr32(dev, 0x418c1c, 0x00000000);
+ nv_wr32(dev, 0x418c20, 0x00000000);
+ nv_wr32(dev, 0x418c24, 0x00000000);
+ nv_wr32(dev, 0x418c28, 0x00000000);
+ nv_wr32(dev, 0x418c2c, 0x00000000);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x418c6c, 0x00000001);
+ nv_wr32(dev, 0x418c80, 0x20200004);
+ nv_wr32(dev, 0x418c8c, 0x00000001);
+ nv_wr32(dev, 0x419000, 0x00000780);
+ nv_wr32(dev, 0x419004, 0x00000000);
+ nv_wr32(dev, 0x419008, 0x00000000);
+ nv_wr32(dev, 0x419014, 0x00000004);
+}
+
+static void
+nvc0_grctx_generate_tp(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chipset = dev_priv->chipset;
+
+ /* GPC_BROADCAST.TP_BROADCAST */
+ nv_wr32(dev, 0x419818, 0x00000000);
+ nv_wr32(dev, 0x41983c, 0x00038bc7);
+ nv_wr32(dev, 0x419848, 0x00000000);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419864, 0x00000129);
+ else
+ nv_wr32(dev, 0x419864, 0x0000012a);
+ nv_wr32(dev, 0x419888, 0x00000000);
+ nv_wr32(dev, 0x419a00, 0x000001f0);
+ nv_wr32(dev, 0x419a04, 0x00000001);
+ nv_wr32(dev, 0x419a08, 0x00000023);
+ nv_wr32(dev, 0x419a0c, 0x00020000);
+ nv_wr32(dev, 0x419a10, 0x00000000);
+ nv_wr32(dev, 0x419a14, 0x00000200);
+ nv_wr32(dev, 0x419a1c, 0x00000000);
+ nv_wr32(dev, 0x419a20, 0x00000800);
+ if (chipset == 0xd9)
+ nv_wr32(dev, 0x00419ac4, 0x0017f440);
+ else if (chipset != 0xc0 && chipset != 0xc8)
+ nv_wr32(dev, 0x00419ac4, 0x0007f440);
+ nv_wr32(dev, 0x419b00, 0x0a418820);
+ nv_wr32(dev, 0x419b04, 0x062080e6);
+ nv_wr32(dev, 0x419b08, 0x020398a4);
+ nv_wr32(dev, 0x419b0c, 0x0e629062);
+ nv_wr32(dev, 0x419b10, 0x0a418820);
+ nv_wr32(dev, 0x419b14, 0x000000e6);
+ nv_wr32(dev, 0x419bd0, 0x00900103);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419be0, 0x00400001);
+ else
+ nv_wr32(dev, 0x419be0, 0x00000001);
+ nv_wr32(dev, 0x419be4, 0x00000000);
+ nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
+ nv_wr32(dev, 0x419c04, 0x00000006);
+ nv_wr32(dev, 0x419c08, 0x00000002);
+ nv_wr32(dev, 0x419c20, 0x00000000);
+ if (dev_priv->chipset == 0xd9) {
+ nv_wr32(dev, 0x419c24, 0x00084210);
+ nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
+ nv_wr32(dev, 0x419cb0, 0x00020048);
+ } else
+ if (chipset == 0xce || chipset == 0xcf) {
+ nv_wr32(dev, 0x419cb0, 0x00020048);
+ } else {
+ nv_wr32(dev, 0x419cb0, 0x00060048);
+ }
+ nv_wr32(dev, 0x419ce8, 0x00000000);
+ nv_wr32(dev, 0x419cf4, 0x00000183);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419d20, 0x12180000);
+ else
+ nv_wr32(dev, 0x419d20, 0x02180000);
+ nv_wr32(dev, 0x419d24, 0x00001fff);
+ if (chipset == 0xc1 || chipset == 0xd9)
+ nv_wr32(dev, 0x419d44, 0x02180218);
+ nv_wr32(dev, 0x419e04, 0x00000000);
+ nv_wr32(dev, 0x419e08, 0x00000000);
+ nv_wr32(dev, 0x419e0c, 0x00000000);
+ nv_wr32(dev, 0x419e10, 0x00000002);
+ nv_wr32(dev, 0x419e44, 0x001beff2);
+ nv_wr32(dev, 0x419e48, 0x00000000);
+ nv_wr32(dev, 0x419e4c, 0x0000000f);
+ nv_wr32(dev, 0x419e50, 0x00000000);
+ nv_wr32(dev, 0x419e54, 0x00000000);
+ nv_wr32(dev, 0x419e58, 0x00000000);
+ nv_wr32(dev, 0x419e5c, 0x00000000);
+ nv_wr32(dev, 0x419e60, 0x00000000);
+ nv_wr32(dev, 0x419e64, 0x00000000);
+ nv_wr32(dev, 0x419e68, 0x00000000);
+ nv_wr32(dev, 0x419e6c, 0x00000000);
+ nv_wr32(dev, 0x419e70, 0x00000000);
+ nv_wr32(dev, 0x419e74, 0x00000000);
+ nv_wr32(dev, 0x419e78, 0x00000000);
+ nv_wr32(dev, 0x419e7c, 0x00000000);
+ nv_wr32(dev, 0x419e80, 0x00000000);
+ nv_wr32(dev, 0x419e84, 0x00000000);
+ nv_wr32(dev, 0x419e88, 0x00000000);
+ nv_wr32(dev, 0x419e8c, 0x00000000);
+ nv_wr32(dev, 0x419e90, 0x00000000);
+ nv_wr32(dev, 0x419e98, 0x00000000);
+ if (chipset != 0xc0 && chipset != 0xc8)
+ nv_wr32(dev, 0x419ee0, 0x00011110);
+ nv_wr32(dev, 0x419f50, 0x00000000);
+ nv_wr32(dev, 0x419f54, 0x00000000);
+ if (chipset != 0xc0 && chipset != 0xc8)
+ nv_wr32(dev, 0x419f58, 0x00000000);
+}
+
+int
+nvc0_grctx_generate(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ int i, gpc, tp, id;
+ u32 fermi = nvc0_graph_class(dev);
+ u32 r000260, tmp;
+
+ r000260 = nv_rd32(dev, 0x000260);
+ nv_wr32(dev, 0x000260, r000260 & ~1);
+ nv_wr32(dev, 0x400208, 0x00000000);
+
+ nvc0_grctx_generate_dispatch(dev);
+ nvc0_grctx_generate_macro(dev);
+ nvc0_grctx_generate_m2mf(dev);
+ nvc0_grctx_generate_unk47xx(dev);
+ nvc0_grctx_generate_shaders(dev);
+ nvc0_grctx_generate_unk60xx(dev);
+ nvc0_grctx_generate_unk64xx(dev);
+ nvc0_grctx_generate_tpbus(dev);
+ nvc0_grctx_generate_ccache(dev);
+ nvc0_grctx_generate_rop(dev);
+ nvc0_grctx_generate_gpc(dev);
+ nvc0_grctx_generate_tp(dev);
+
+ nv_wr32(dev, 0x404154, 0x00000000);
+
+ /* fuc "mmio list" writes */
+ for (i = 0; i < grch->mmio_nr * 8; i += 8) {
+ u32 reg = nv_ro32(grch->mmio, i + 0);
+ nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
+ }
+
+ for (tp = 0, id = 0; tp < 4; tp++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tp < priv->tp_nr[gpc]) {
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
+ id++;
+ }
+
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
+ }
+ }
+
+ tmp = 0;
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tp_nr[i] << (i * 4);
+ nv_wr32(dev, 0x406028, tmp);
+ nv_wr32(dev, 0x405870, tmp);
+
+ nv_wr32(dev, 0x40602c, 0x00000000);
+ nv_wr32(dev, 0x405874, 0x00000000);
+ nv_wr32(dev, 0x406030, 0x00000000);
+ nv_wr32(dev, 0x405878, 0x00000000);
+ nv_wr32(dev, 0x406034, 0x00000000);
+ nv_wr32(dev, 0x40587c, 0x00000000);
+
+ if (1) {
+ u8 tpnr[GPC_MAX], data[TP_MAX];
+
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ memset(data, 0x1f, sizeof(data));
+
+ gpc = -1;
+ for (tp = 0; tp < priv->tp_total; tp++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tpnr[gpc]--;
+ data[tp] = gpc;
+ }
+
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
+ }
+
+ if (1) {
+ u32 data[6] = {}, data2[2] = {};
+ u8 tpnr[GPC_MAX];
+ u8 shift, ntpcv;
+
+ /* calculate first set of magics */
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+
+ gpc = -1;
+ for (tp = 0; tp < priv->tp_total; tp++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tpnr[gpc]--;
+
+ data[tp / 6] |= gpc << ((tp % 6) * 5);
+ }
+
+ for (; tp < 32; tp++)
+ data[tp / 6] |= 7 << ((tp % 6) * 5);
+
+ /* and the second... */
+ shift = 0;
+ ntpcv = priv->tp_total;
+ while (!(ntpcv & (1 << 4))) {
+ ntpcv <<= 1;
+ shift++;
+ }
+
+ data2[0] = (ntpcv << 16);
+ data2[0] |= (shift << 21);
+ data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+ for (i = 1; i < 7; i++)
+ data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+ /* GPC_BROADCAST */
+ nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
+
+ /* GPC_BROADCAST.TP_BROADCAST */
+ nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr |
+ data2[0]);
+ nv_wr32(dev, 0x419be4, data2[1]);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
+
+ /* UNK78xx */
+ nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
+ priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x40780c + (i * 4), data[i]);
+ }
+
+ if (1) {
+ u32 tp_mask = 0, tp_set = 0;
+ u8 tpnr[GPC_MAX], a, b;
+
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
+
+ for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+ a = (i * (priv->tp_total - 1)) / 32;
+ if (a != b) {
+ b = a;
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tp = priv->tp_nr[gpc] - tpnr[gpc]--;
+
+ tp_set |= 1 << ((gpc * 8) + tp);
+ }
+
+ nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
+ nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
+ }
+ }
+
+ nv_wr32(dev, 0x400208, 0x80000000);
+
+ nv_icmd(dev, 0x00001000, 0x00000004);
+ nv_icmd(dev, 0x000000a9, 0x0000ffff);
+ nv_icmd(dev, 0x00000038, 0x0fac6881);
+ nv_icmd(dev, 0x0000003d, 0x00000001);
+ nv_icmd(dev, 0x000000e8, 0x00000400);
+ nv_icmd(dev, 0x000000e9, 0x00000400);
+ nv_icmd(dev, 0x000000ea, 0x00000400);
+ nv_icmd(dev, 0x000000eb, 0x00000400);
+ nv_icmd(dev, 0x000000ec, 0x00000400);
+ nv_icmd(dev, 0x000000ed, 0x00000400);
+ nv_icmd(dev, 0x000000ee, 0x00000400);
+ nv_icmd(dev, 0x000000ef, 0x00000400);
+ nv_icmd(dev, 0x00000078, 0x00000300);
+ nv_icmd(dev, 0x00000079, 0x00000300);
+ nv_icmd(dev, 0x0000007a, 0x00000300);
+ nv_icmd(dev, 0x0000007b, 0x00000300);
+ nv_icmd(dev, 0x0000007c, 0x00000300);
+ nv_icmd(dev, 0x0000007d, 0x00000300);
+ nv_icmd(dev, 0x0000007e, 0x00000300);
+ nv_icmd(dev, 0x0000007f, 0x00000300);
+ nv_icmd(dev, 0x00000050, 0x00000011);
+ nv_icmd(dev, 0x00000058, 0x00000008);
+ nv_icmd(dev, 0x00000059, 0x00000008);
+ nv_icmd(dev, 0x0000005a, 0x00000008);
+ nv_icmd(dev, 0x0000005b, 0x00000008);
+ nv_icmd(dev, 0x0000005c, 0x00000008);
+ nv_icmd(dev, 0x0000005d, 0x00000008);
+ nv_icmd(dev, 0x0000005e, 0x00000008);
+ nv_icmd(dev, 0x0000005f, 0x00000008);
+ nv_icmd(dev, 0x00000208, 0x00000001);
+ nv_icmd(dev, 0x00000209, 0x00000001);
+ nv_icmd(dev, 0x0000020a, 0x00000001);
+ nv_icmd(dev, 0x0000020b, 0x00000001);
+ nv_icmd(dev, 0x0000020c, 0x00000001);
+ nv_icmd(dev, 0x0000020d, 0x00000001);
+ nv_icmd(dev, 0x0000020e, 0x00000001);
+ nv_icmd(dev, 0x0000020f, 0x00000001);
+ nv_icmd(dev, 0x00000081, 0x00000001);
+ nv_icmd(dev, 0x00000085, 0x00000004);
+ nv_icmd(dev, 0x00000088, 0x00000400);
+ nv_icmd(dev, 0x00000090, 0x00000300);
+ nv_icmd(dev, 0x00000098, 0x00001001);
+ nv_icmd(dev, 0x000000e3, 0x00000001);
+ nv_icmd(dev, 0x000000da, 0x00000001);
+ nv_icmd(dev, 0x000000f8, 0x00000003);
+ nv_icmd(dev, 0x000000fa, 0x00000001);
+ nv_icmd(dev, 0x0000009f, 0x0000ffff);
+ nv_icmd(dev, 0x000000a0, 0x0000ffff);
+ nv_icmd(dev, 0x000000a1, 0x0000ffff);
+ nv_icmd(dev, 0x000000a2, 0x0000ffff);
+ nv_icmd(dev, 0x000000b1, 0x00000001);
+ nv_icmd(dev, 0x000000b2, 0x00000000);
+ nv_icmd(dev, 0x000000b3, 0x00000000);
+ nv_icmd(dev, 0x000000b4, 0x00000000);
+ nv_icmd(dev, 0x000000b5, 0x00000000);
+ nv_icmd(dev, 0x000000b6, 0x00000000);
+ nv_icmd(dev, 0x000000b7, 0x00000000);
+ nv_icmd(dev, 0x000000b8, 0x00000000);
+ nv_icmd(dev, 0x000000b9, 0x00000000);
+ nv_icmd(dev, 0x000000ba, 0x00000000);
+ nv_icmd(dev, 0x000000bb, 0x00000000);
+ nv_icmd(dev, 0x000000bc, 0x00000000);
+ nv_icmd(dev, 0x000000bd, 0x00000000);
+ nv_icmd(dev, 0x000000be, 0x00000000);
+ nv_icmd(dev, 0x000000bf, 0x00000000);
+ nv_icmd(dev, 0x000000c0, 0x00000000);
+ nv_icmd(dev, 0x000000c1, 0x00000000);
+ nv_icmd(dev, 0x000000c2, 0x00000000);
+ nv_icmd(dev, 0x000000c3, 0x00000000);
+ nv_icmd(dev, 0x000000c4, 0x00000000);
+ nv_icmd(dev, 0x000000c5, 0x00000000);
+ nv_icmd(dev, 0x000000c6, 0x00000000);
+ nv_icmd(dev, 0x000000c7, 0x00000000);
+ nv_icmd(dev, 0x000000c8, 0x00000000);
+ nv_icmd(dev, 0x000000c9, 0x00000000);
+ nv_icmd(dev, 0x000000ca, 0x00000000);
+ nv_icmd(dev, 0x000000cb, 0x00000000);
+ nv_icmd(dev, 0x000000cc, 0x00000000);
+ nv_icmd(dev, 0x000000cd, 0x00000000);
+ nv_icmd(dev, 0x000000ce, 0x00000000);
+ nv_icmd(dev, 0x000000cf, 0x00000000);
+ nv_icmd(dev, 0x000000d0, 0x00000000);
+ nv_icmd(dev, 0x000000d1, 0x00000000);
+ nv_icmd(dev, 0x000000d2, 0x00000000);
+ nv_icmd(dev, 0x000000d3, 0x00000000);
+ nv_icmd(dev, 0x000000d4, 0x00000000);
+ nv_icmd(dev, 0x000000d5, 0x00000000);
+ nv_icmd(dev, 0x000000d6, 0x00000000);
+ nv_icmd(dev, 0x000000d7, 0x00000000);
+ nv_icmd(dev, 0x000000d8, 0x00000000);
+ nv_icmd(dev, 0x000000d9, 0x00000000);
+ nv_icmd(dev, 0x00000210, 0x00000040);
+ nv_icmd(dev, 0x00000211, 0x00000040);
+ nv_icmd(dev, 0x00000212, 0x00000040);
+ nv_icmd(dev, 0x00000213, 0x00000040);
+ nv_icmd(dev, 0x00000214, 0x00000040);
+ nv_icmd(dev, 0x00000215, 0x00000040);
+ nv_icmd(dev, 0x00000216, 0x00000040);
+ nv_icmd(dev, 0x00000217, 0x00000040);
+ if (dev_priv->chipset == 0xd9) {
+ for (i = 0x0400; i <= 0x0417; i++)
+ nv_icmd(dev, i, 0x00000040);
+ }
+ nv_icmd(dev, 0x00000218, 0x0000c080);
+ nv_icmd(dev, 0x00000219, 0x0000c080);
+ nv_icmd(dev, 0x0000021a, 0x0000c080);
+ nv_icmd(dev, 0x0000021b, 0x0000c080);
+ nv_icmd(dev, 0x0000021c, 0x0000c080);
+ nv_icmd(dev, 0x0000021d, 0x0000c080);
+ nv_icmd(dev, 0x0000021e, 0x0000c080);
+ nv_icmd(dev, 0x0000021f, 0x0000c080);
+ if (dev_priv->chipset == 0xd9) {
+ for (i = 0x0440; i <= 0x0457; i++)
+ nv_icmd(dev, i, 0x0000c080);
+ }
+ nv_icmd(dev, 0x000000ad, 0x0000013e);
+ nv_icmd(dev, 0x000000e1, 0x00000010);
+ nv_icmd(dev, 0x00000290, 0x00000000);
+ nv_icmd(dev, 0x00000291, 0x00000000);
+ nv_icmd(dev, 0x00000292, 0x00000000);
+ nv_icmd(dev, 0x00000293, 0x00000000);
+ nv_icmd(dev, 0x00000294, 0x00000000);
+ nv_icmd(dev, 0x00000295, 0x00000000);
+ nv_icmd(dev, 0x00000296, 0x00000000);
+ nv_icmd(dev, 0x00000297, 0x00000000);
+ nv_icmd(dev, 0x00000298, 0x00000000);
+ nv_icmd(dev, 0x00000299, 0x00000000);
+ nv_icmd(dev, 0x0000029a, 0x00000000);
+ nv_icmd(dev, 0x0000029b, 0x00000000);
+ nv_icmd(dev, 0x0000029c, 0x00000000);
+ nv_icmd(dev, 0x0000029d, 0x00000000);
+ nv_icmd(dev, 0x0000029e, 0x00000000);
+ nv_icmd(dev, 0x0000029f, 0x00000000);
+ nv_icmd(dev, 0x000003b0, 0x00000000);
+ nv_icmd(dev, 0x000003b1, 0x00000000);
+ nv_icmd(dev, 0x000003b2, 0x00000000);
+ nv_icmd(dev, 0x000003b3, 0x00000000);
+ nv_icmd(dev, 0x000003b4, 0x00000000);
+ nv_icmd(dev, 0x000003b5, 0x00000000);
+ nv_icmd(dev, 0x000003b6, 0x00000000);
+ nv_icmd(dev, 0x000003b7, 0x00000000);
+ nv_icmd(dev, 0x000003b8, 0x00000000);
+ nv_icmd(dev, 0x000003b9, 0x00000000);
+ nv_icmd(dev, 0x000003ba, 0x00000000);
+ nv_icmd(dev, 0x000003bb, 0x00000000);
+ nv_icmd(dev, 0x000003bc, 0x00000000);
+ nv_icmd(dev, 0x000003bd, 0x00000000);
+ nv_icmd(dev, 0x000003be, 0x00000000);
+ nv_icmd(dev, 0x000003bf, 0x00000000);
+ nv_icmd(dev, 0x000002a0, 0x00000000);
+ nv_icmd(dev, 0x000002a1, 0x00000000);
+ nv_icmd(dev, 0x000002a2, 0x00000000);
+ nv_icmd(dev, 0x000002a3, 0x00000000);
+ nv_icmd(dev, 0x000002a4, 0x00000000);
+ nv_icmd(dev, 0x000002a5, 0x00000000);
+ nv_icmd(dev, 0x000002a6, 0x00000000);
+ nv_icmd(dev, 0x000002a7, 0x00000000);
+ nv_icmd(dev, 0x000002a8, 0x00000000);
+ nv_icmd(dev, 0x000002a9, 0x00000000);
+ nv_icmd(dev, 0x000002aa, 0x00000000);
+ nv_icmd(dev, 0x000002ab, 0x00000000);
+ nv_icmd(dev, 0x000002ac, 0x00000000);
+ nv_icmd(dev, 0x000002ad, 0x00000000);
+ nv_icmd(dev, 0x000002ae, 0x00000000);
+ nv_icmd(dev, 0x000002af, 0x00000000);
+ nv_icmd(dev, 0x00000420, 0x00000000);
+ nv_icmd(dev, 0x00000421, 0x00000000);
+ nv_icmd(dev, 0x00000422, 0x00000000);
+ nv_icmd(dev, 0x00000423, 0x00000000);
+ nv_icmd(dev, 0x00000424, 0x00000000);
+ nv_icmd(dev, 0x00000425, 0x00000000);
+ nv_icmd(dev, 0x00000426, 0x00000000);
+ nv_icmd(dev, 0x00000427, 0x00000000);
+ nv_icmd(dev, 0x00000428, 0x00000000);
+ nv_icmd(dev, 0x00000429, 0x00000000);
+ nv_icmd(dev, 0x0000042a, 0x00000000);
+ nv_icmd(dev, 0x0000042b, 0x00000000);
+ nv_icmd(dev, 0x0000042c, 0x00000000);
+ nv_icmd(dev, 0x0000042d, 0x00000000);
+ nv_icmd(dev, 0x0000042e, 0x00000000);
+ nv_icmd(dev, 0x0000042f, 0x00000000);
+ nv_icmd(dev, 0x000002b0, 0x00000000);
+ nv_icmd(dev, 0x000002b1, 0x00000000);
+ nv_icmd(dev, 0x000002b2, 0x00000000);
+ nv_icmd(dev, 0x000002b3, 0x00000000);
+ nv_icmd(dev, 0x000002b4, 0x00000000);
+ nv_icmd(dev, 0x000002b5, 0x00000000);
+ nv_icmd(dev, 0x000002b6, 0x00000000);
+ nv_icmd(dev, 0x000002b7, 0x00000000);
+ nv_icmd(dev, 0x000002b8, 0x00000000);
+ nv_icmd(dev, 0x000002b9, 0x00000000);
+ nv_icmd(dev, 0x000002ba, 0x00000000);
+ nv_icmd(dev, 0x000002bb, 0x00000000);
+ nv_icmd(dev, 0x000002bc, 0x00000000);
+ nv_icmd(dev, 0x000002bd, 0x00000000);
+ nv_icmd(dev, 0x000002be, 0x00000000);
+ nv_icmd(dev, 0x000002bf, 0x00000000);
+ nv_icmd(dev, 0x00000430, 0x00000000);
+ nv_icmd(dev, 0x00000431, 0x00000000);
+ nv_icmd(dev, 0x00000432, 0x00000000);
+ nv_icmd(dev, 0x00000433, 0x00000000);
+ nv_icmd(dev, 0x00000434, 0x00000000);
+ nv_icmd(dev, 0x00000435, 0x00000000);
+ nv_icmd(dev, 0x00000436, 0x00000000);
+ nv_icmd(dev, 0x00000437, 0x00000000);
+ nv_icmd(dev, 0x00000438, 0x00000000);
+ nv_icmd(dev, 0x00000439, 0x00000000);
+ nv_icmd(dev, 0x0000043a, 0x00000000);
+ nv_icmd(dev, 0x0000043b, 0x00000000);
+ nv_icmd(dev, 0x0000043c, 0x00000000);
+ nv_icmd(dev, 0x0000043d, 0x00000000);
+ nv_icmd(dev, 0x0000043e, 0x00000000);
+ nv_icmd(dev, 0x0000043f, 0x00000000);
+ nv_icmd(dev, 0x000002c0, 0x00000000);
+ nv_icmd(dev, 0x000002c1, 0x00000000);
+ nv_icmd(dev, 0x000002c2, 0x00000000);
+ nv_icmd(dev, 0x000002c3, 0x00000000);
+ nv_icmd(dev, 0x000002c4, 0x00000000);
+ nv_icmd(dev, 0x000002c5, 0x00000000);
+ nv_icmd(dev, 0x000002c6, 0x00000000);
+ nv_icmd(dev, 0x000002c7, 0x00000000);
+ nv_icmd(dev, 0x000002c8, 0x00000000);
+ nv_icmd(dev, 0x000002c9, 0x00000000);
+ nv_icmd(dev, 0x000002ca, 0x00000000);
+ nv_icmd(dev, 0x000002cb, 0x00000000);
+ nv_icmd(dev, 0x000002cc, 0x00000000);
+ nv_icmd(dev, 0x000002cd, 0x00000000);
+ nv_icmd(dev, 0x000002ce, 0x00000000);
+ nv_icmd(dev, 0x000002cf, 0x00000000);
+ nv_icmd(dev, 0x000004d0, 0x00000000);
+ nv_icmd(dev, 0x000004d1, 0x00000000);
+ nv_icmd(dev, 0x000004d2, 0x00000000);
+ nv_icmd(dev, 0x000004d3, 0x00000000);
+ nv_icmd(dev, 0x000004d4, 0x00000000);
+ nv_icmd(dev, 0x000004d5, 0x00000000);
+ nv_icmd(dev, 0x000004d6, 0x00000000);
+ nv_icmd(dev, 0x000004d7, 0x00000000);
+ nv_icmd(dev, 0x000004d8, 0x00000000);
+ nv_icmd(dev, 0x000004d9, 0x00000000);
+ nv_icmd(dev, 0x000004da, 0x00000000);
+ nv_icmd(dev, 0x000004db, 0x00000000);
+ nv_icmd(dev, 0x000004dc, 0x00000000);
+ nv_icmd(dev, 0x000004dd, 0x00000000);
+ nv_icmd(dev, 0x000004de, 0x00000000);
+ nv_icmd(dev, 0x000004df, 0x00000000);
+ nv_icmd(dev, 0x00000720, 0x00000000);
+ nv_icmd(dev, 0x00000721, 0x00000000);
+ nv_icmd(dev, 0x00000722, 0x00000000);
+ nv_icmd(dev, 0x00000723, 0x00000000);
+ nv_icmd(dev, 0x00000724, 0x00000000);
+ nv_icmd(dev, 0x00000725, 0x00000000);
+ nv_icmd(dev, 0x00000726, 0x00000000);
+ nv_icmd(dev, 0x00000727, 0x00000000);
+ nv_icmd(dev, 0x00000728, 0x00000000);
+ nv_icmd(dev, 0x00000729, 0x00000000);
+ nv_icmd(dev, 0x0000072a, 0x00000000);
+ nv_icmd(dev, 0x0000072b, 0x00000000);
+ nv_icmd(dev, 0x0000072c, 0x00000000);
+ nv_icmd(dev, 0x0000072d, 0x00000000);
+ nv_icmd(dev, 0x0000072e, 0x00000000);
+ nv_icmd(dev, 0x0000072f, 0x00000000);
+ nv_icmd(dev, 0x000008c0, 0x00000000);
+ nv_icmd(dev, 0x000008c1, 0x00000000);
+ nv_icmd(dev, 0x000008c2, 0x00000000);
+ nv_icmd(dev, 0x000008c3, 0x00000000);
+ nv_icmd(dev, 0x000008c4, 0x00000000);
+ nv_icmd(dev, 0x000008c5, 0x00000000);
+ nv_icmd(dev, 0x000008c6, 0x00000000);
+ nv_icmd(dev, 0x000008c7, 0x00000000);
+ nv_icmd(dev, 0x000008c8, 0x00000000);
+ nv_icmd(dev, 0x000008c9, 0x00000000);
+ nv_icmd(dev, 0x000008ca, 0x00000000);
+ nv_icmd(dev, 0x000008cb, 0x00000000);
+ nv_icmd(dev, 0x000008cc, 0x00000000);
+ nv_icmd(dev, 0x000008cd, 0x00000000);
+ nv_icmd(dev, 0x000008ce, 0x00000000);
+ nv_icmd(dev, 0x000008cf, 0x00000000);
+ nv_icmd(dev, 0x00000890, 0x00000000);
+ nv_icmd(dev, 0x00000891, 0x00000000);
+ nv_icmd(dev, 0x00000892, 0x00000000);
+ nv_icmd(dev, 0x00000893, 0x00000000);
+ nv_icmd(dev, 0x00000894, 0x00000000);
+ nv_icmd(dev, 0x00000895, 0x00000000);
+ nv_icmd(dev, 0x00000896, 0x00000000);
+ nv_icmd(dev, 0x00000897, 0x00000000);
+ nv_icmd(dev, 0x00000898, 0x00000000);
+ nv_icmd(dev, 0x00000899, 0x00000000);
+ nv_icmd(dev, 0x0000089a, 0x00000000);
+ nv_icmd(dev, 0x0000089b, 0x00000000);
+ nv_icmd(dev, 0x0000089c, 0x00000000);
+ nv_icmd(dev, 0x0000089d, 0x00000000);
+ nv_icmd(dev, 0x0000089e, 0x00000000);
+ nv_icmd(dev, 0x0000089f, 0x00000000);
+ nv_icmd(dev, 0x000008e0, 0x00000000);
+ nv_icmd(dev, 0x000008e1, 0x00000000);
+ nv_icmd(dev, 0x000008e2, 0x00000000);
+ nv_icmd(dev, 0x000008e3, 0x00000000);
+ nv_icmd(dev, 0x000008e4, 0x00000000);
+ nv_icmd(dev, 0x000008e5, 0x00000000);
+ nv_icmd(dev, 0x000008e6, 0x00000000);
+ nv_icmd(dev, 0x000008e7, 0x00000000);
+ nv_icmd(dev, 0x000008e8, 0x00000000);
+ nv_icmd(dev, 0x000008e9, 0x00000000);
+ nv_icmd(dev, 0x000008ea, 0x00000000);
+ nv_icmd(dev, 0x000008eb, 0x00000000);
+ nv_icmd(dev, 0x000008ec, 0x00000000);
+ nv_icmd(dev, 0x000008ed, 0x00000000);
+ nv_icmd(dev, 0x000008ee, 0x00000000);
+ nv_icmd(dev, 0x000008ef, 0x00000000);
+ nv_icmd(dev, 0x000008a0, 0x00000000);
+ nv_icmd(dev, 0x000008a1, 0x00000000);
+ nv_icmd(dev, 0x000008a2, 0x00000000);
+ nv_icmd(dev, 0x000008a3, 0x00000000);
+ nv_icmd(dev, 0x000008a4, 0x00000000);
+ nv_icmd(dev, 0x000008a5, 0x00000000);
+ nv_icmd(dev, 0x000008a6, 0x00000000);
+ nv_icmd(dev, 0x000008a7, 0x00000000);
+ nv_icmd(dev, 0x000008a8, 0x00000000);
+ nv_icmd(dev, 0x000008a9, 0x00000000);
+ nv_icmd(dev, 0x000008aa, 0x00000000);
+ nv_icmd(dev, 0x000008ab, 0x00000000);
+ nv_icmd(dev, 0x000008ac, 0x00000000);
+ nv_icmd(dev, 0x000008ad, 0x00000000);
+ nv_icmd(dev, 0x000008ae, 0x00000000);
+ nv_icmd(dev, 0x000008af, 0x00000000);
+ nv_icmd(dev, 0x000008f0, 0x00000000);
+ nv_icmd(dev, 0x000008f1, 0x00000000);
+ nv_icmd(dev, 0x000008f2, 0x00000000);
+ nv_icmd(dev, 0x000008f3, 0x00000000);
+ nv_icmd(dev, 0x000008f4, 0x00000000);
+ nv_icmd(dev, 0x000008f5, 0x00000000);
+ nv_icmd(dev, 0x000008f6, 0x00000000);
+ nv_icmd(dev, 0x000008f7, 0x00000000);
+ nv_icmd(dev, 0x000008f8, 0x00000000);
+ nv_icmd(dev, 0x000008f9, 0x00000000);
+ nv_icmd(dev, 0x000008fa, 0x00000000);
+ nv_icmd(dev, 0x000008fb, 0x00000000);
+ nv_icmd(dev, 0x000008fc, 0x00000000);
+ nv_icmd(dev, 0x000008fd, 0x00000000);
+ nv_icmd(dev, 0x000008fe, 0x00000000);
+ nv_icmd(dev, 0x000008ff, 0x00000000);
+ nv_icmd(dev, 0x0000094c, 0x000000ff);
+ nv_icmd(dev, 0x0000094d, 0xffffffff);
+ nv_icmd(dev, 0x0000094e, 0x00000002);
+ nv_icmd(dev, 0x000002ec, 0x00000001);
+ nv_icmd(dev, 0x00000303, 0x00000001);
+ nv_icmd(dev, 0x000002e6, 0x00000001);
+ nv_icmd(dev, 0x00000466, 0x00000052);
+ nv_icmd(dev, 0x00000301, 0x3f800000);
+ nv_icmd(dev, 0x00000304, 0x30201000);
+ nv_icmd(dev, 0x00000305, 0x70605040);
+ nv_icmd(dev, 0x00000306, 0xb8a89888);
+ nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
+ nv_icmd(dev, 0x0000030a, 0x00ffff00);
+ nv_icmd(dev, 0x0000030b, 0x0000001a);
+ nv_icmd(dev, 0x0000030c, 0x00000001);
+ nv_icmd(dev, 0x00000318, 0x00000001);
+ nv_icmd(dev, 0x00000340, 0x00000000);
+ nv_icmd(dev, 0x00000375, 0x00000001);
+ nv_icmd(dev, 0x00000351, 0x00000100);
+ nv_icmd(dev, 0x0000037d, 0x00000006);
+ nv_icmd(dev, 0x000003a0, 0x00000002);
+ nv_icmd(dev, 0x000003aa, 0x00000001);
+ nv_icmd(dev, 0x000003a9, 0x00000001);
+ nv_icmd(dev, 0x00000380, 0x00000001);
+ nv_icmd(dev, 0x00000360, 0x00000040);
+ nv_icmd(dev, 0x00000366, 0x00000000);
+ nv_icmd(dev, 0x00000367, 0x00000000);
+ nv_icmd(dev, 0x00000368, 0x00001fff);
+ nv_icmd(dev, 0x00000370, 0x00000000);
+ nv_icmd(dev, 0x00000371, 0x00000000);
+ nv_icmd(dev, 0x00000372, 0x003fffff);
+ nv_icmd(dev, 0x0000037a, 0x00000012);
+ nv_icmd(dev, 0x000005e0, 0x00000022);
+ nv_icmd(dev, 0x000005e1, 0x00000022);
+ nv_icmd(dev, 0x000005e2, 0x00000022);
+ nv_icmd(dev, 0x000005e3, 0x00000022);
+ nv_icmd(dev, 0x000005e4, 0x00000022);
+ nv_icmd(dev, 0x00000619, 0x00000003);
+ nv_icmd(dev, 0x00000811, 0x00000003);
+ nv_icmd(dev, 0x00000812, 0x00000004);
+ nv_icmd(dev, 0x00000813, 0x00000006);
+ nv_icmd(dev, 0x00000814, 0x00000008);
+ nv_icmd(dev, 0x00000815, 0x0000000b);
+ nv_icmd(dev, 0x00000800, 0x00000001);
+ nv_icmd(dev, 0x00000801, 0x00000001);
+ nv_icmd(dev, 0x00000802, 0x00000001);
+ nv_icmd(dev, 0x00000803, 0x00000001);
+ nv_icmd(dev, 0x00000804, 0x00000001);
+ nv_icmd(dev, 0x00000805, 0x00000001);
+ nv_icmd(dev, 0x00000632, 0x00000001);
+ nv_icmd(dev, 0x00000633, 0x00000002);
+ nv_icmd(dev, 0x00000634, 0x00000003);
+ nv_icmd(dev, 0x00000635, 0x00000004);
+ nv_icmd(dev, 0x00000654, 0x3f800000);
+ nv_icmd(dev, 0x00000657, 0x3f800000);
+ nv_icmd(dev, 0x00000655, 0x3f800000);
+ nv_icmd(dev, 0x00000656, 0x3f800000);
+ nv_icmd(dev, 0x000006cd, 0x3f800000);
+ nv_icmd(dev, 0x000007f5, 0x3f800000);
+ nv_icmd(dev, 0x000007dc, 0x39291909);
+ nv_icmd(dev, 0x000007dd, 0x79695949);
+ nv_icmd(dev, 0x000007de, 0xb9a99989);
+ nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
+ nv_icmd(dev, 0x000007e8, 0x00003210);
+ nv_icmd(dev, 0x000007e9, 0x00007654);
+ nv_icmd(dev, 0x000007ea, 0x00000098);
+ nv_icmd(dev, 0x000007ec, 0x39291909);
+ nv_icmd(dev, 0x000007ed, 0x79695949);
+ nv_icmd(dev, 0x000007ee, 0xb9a99989);
+ nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
+ nv_icmd(dev, 0x000007f0, 0x00003210);
+ nv_icmd(dev, 0x000007f1, 0x00007654);
+ nv_icmd(dev, 0x000007f2, 0x00000098);
+ nv_icmd(dev, 0x000005a5, 0x00000001);
+ nv_icmd(dev, 0x00000980, 0x00000000);
+ nv_icmd(dev, 0x00000981, 0x00000000);
+ nv_icmd(dev, 0x00000982, 0x00000000);
+ nv_icmd(dev, 0x00000983, 0x00000000);
+ nv_icmd(dev, 0x00000984, 0x00000000);
+ nv_icmd(dev, 0x00000985, 0x00000000);
+ nv_icmd(dev, 0x00000986, 0x00000000);
+ nv_icmd(dev, 0x00000987, 0x00000000);
+ nv_icmd(dev, 0x00000988, 0x00000000);
+ nv_icmd(dev, 0x00000989, 0x00000000);
+ nv_icmd(dev, 0x0000098a, 0x00000000);
+ nv_icmd(dev, 0x0000098b, 0x00000000);
+ nv_icmd(dev, 0x0000098c, 0x00000000);
+ nv_icmd(dev, 0x0000098d, 0x00000000);
+ nv_icmd(dev, 0x0000098e, 0x00000000);
+ nv_icmd(dev, 0x0000098f, 0x00000000);
+ nv_icmd(dev, 0x00000990, 0x00000000);
+ nv_icmd(dev, 0x00000991, 0x00000000);
+ nv_icmd(dev, 0x00000992, 0x00000000);
+ nv_icmd(dev, 0x00000993, 0x00000000);
+ nv_icmd(dev, 0x00000994, 0x00000000);
+ nv_icmd(dev, 0x00000995, 0x00000000);
+ nv_icmd(dev, 0x00000996, 0x00000000);
+ nv_icmd(dev, 0x00000997, 0x00000000);
+ nv_icmd(dev, 0x00000998, 0x00000000);
+ nv_icmd(dev, 0x00000999, 0x00000000);
+ nv_icmd(dev, 0x0000099a, 0x00000000);
+ nv_icmd(dev, 0x0000099b, 0x00000000);
+ nv_icmd(dev, 0x0000099c, 0x00000000);
+ nv_icmd(dev, 0x0000099d, 0x00000000);
+ nv_icmd(dev, 0x0000099e, 0x00000000);
+ nv_icmd(dev, 0x0000099f, 0x00000000);
+ nv_icmd(dev, 0x000009a0, 0x00000000);
+ nv_icmd(dev, 0x000009a1, 0x00000000);
+ nv_icmd(dev, 0x000009a2, 0x00000000);
+ nv_icmd(dev, 0x000009a3, 0x00000000);
+ nv_icmd(dev, 0x000009a4, 0x00000000);
+ nv_icmd(dev, 0x000009a5, 0x00000000);
+ nv_icmd(dev, 0x000009a6, 0x00000000);
+ nv_icmd(dev, 0x000009a7, 0x00000000);
+ nv_icmd(dev, 0x000009a8, 0x00000000);
+ nv_icmd(dev, 0x000009a9, 0x00000000);
+ nv_icmd(dev, 0x000009aa, 0x00000000);
+ nv_icmd(dev, 0x000009ab, 0x00000000);
+ nv_icmd(dev, 0x000009ac, 0x00000000);
+ nv_icmd(dev, 0x000009ad, 0x00000000);
+ nv_icmd(dev, 0x000009ae, 0x00000000);
+ nv_icmd(dev, 0x000009af, 0x00000000);
+ nv_icmd(dev, 0x000009b0, 0x00000000);
+ nv_icmd(dev, 0x000009b1, 0x00000000);
+ nv_icmd(dev, 0x000009b2, 0x00000000);
+ nv_icmd(dev, 0x000009b3, 0x00000000);
+ nv_icmd(dev, 0x000009b4, 0x00000000);
+ nv_icmd(dev, 0x000009b5, 0x00000000);
+ nv_icmd(dev, 0x000009b6, 0x00000000);
+ nv_icmd(dev, 0x000009b7, 0x00000000);
+ nv_icmd(dev, 0x000009b8, 0x00000000);
+ nv_icmd(dev, 0x000009b9, 0x00000000);
+ nv_icmd(dev, 0x000009ba, 0x00000000);
+ nv_icmd(dev, 0x000009bb, 0x00000000);
+ nv_icmd(dev, 0x000009bc, 0x00000000);
+ nv_icmd(dev, 0x000009bd, 0x00000000);
+ nv_icmd(dev, 0x000009be, 0x00000000);
+ nv_icmd(dev, 0x000009bf, 0x00000000);
+ nv_icmd(dev, 0x000009c0, 0x00000000);
+ nv_icmd(dev, 0x000009c1, 0x00000000);
+ nv_icmd(dev, 0x000009c2, 0x00000000);
+ nv_icmd(dev, 0x000009c3, 0x00000000);
+ nv_icmd(dev, 0x000009c4, 0x00000000);
+ nv_icmd(dev, 0x000009c5, 0x00000000);
+ nv_icmd(dev, 0x000009c6, 0x00000000);
+ nv_icmd(dev, 0x000009c7, 0x00000000);
+ nv_icmd(dev, 0x000009c8, 0x00000000);
+ nv_icmd(dev, 0x000009c9, 0x00000000);
+ nv_icmd(dev, 0x000009ca, 0x00000000);
+ nv_icmd(dev, 0x000009cb, 0x00000000);
+ nv_icmd(dev, 0x000009cc, 0x00000000);
+ nv_icmd(dev, 0x000009cd, 0x00000000);
+ nv_icmd(dev, 0x000009ce, 0x00000000);
+ nv_icmd(dev, 0x000009cf, 0x00000000);
+ nv_icmd(dev, 0x000009d0, 0x00000000);
+ nv_icmd(dev, 0x000009d1, 0x00000000);
+ nv_icmd(dev, 0x000009d2, 0x00000000);
+ nv_icmd(dev, 0x000009d3, 0x00000000);
+ nv_icmd(dev, 0x000009d4, 0x00000000);
+ nv_icmd(dev, 0x000009d5, 0x00000000);
+ nv_icmd(dev, 0x000009d6, 0x00000000);
+ nv_icmd(dev, 0x000009d7, 0x00000000);
+ nv_icmd(dev, 0x000009d8, 0x00000000);
+ nv_icmd(dev, 0x000009d9, 0x00000000);
+ nv_icmd(dev, 0x000009da, 0x00000000);
+ nv_icmd(dev, 0x000009db, 0x00000000);
+ nv_icmd(dev, 0x000009dc, 0x00000000);
+ nv_icmd(dev, 0x000009dd, 0x00000000);
+ nv_icmd(dev, 0x000009de, 0x00000000);
+ nv_icmd(dev, 0x000009df, 0x00000000);
+ nv_icmd(dev, 0x000009e0, 0x00000000);
+ nv_icmd(dev, 0x000009e1, 0x00000000);
+ nv_icmd(dev, 0x000009e2, 0x00000000);
+ nv_icmd(dev, 0x000009e3, 0x00000000);
+ nv_icmd(dev, 0x000009e4, 0x00000000);
+ nv_icmd(dev, 0x000009e5, 0x00000000);
+ nv_icmd(dev, 0x000009e6, 0x00000000);
+ nv_icmd(dev, 0x000009e7, 0x00000000);
+ nv_icmd(dev, 0x000009e8, 0x00000000);
+ nv_icmd(dev, 0x000009e9, 0x00000000);
+ nv_icmd(dev, 0x000009ea, 0x00000000);
+ nv_icmd(dev, 0x000009eb, 0x00000000);
+ nv_icmd(dev, 0x000009ec, 0x00000000);
+ nv_icmd(dev, 0x000009ed, 0x00000000);
+ nv_icmd(dev, 0x000009ee, 0x00000000);
+ nv_icmd(dev, 0x000009ef, 0x00000000);
+ nv_icmd(dev, 0x000009f0, 0x00000000);
+ nv_icmd(dev, 0x000009f1, 0x00000000);
+ nv_icmd(dev, 0x000009f2, 0x00000000);
+ nv_icmd(dev, 0x000009f3, 0x00000000);
+ nv_icmd(dev, 0x000009f4, 0x00000000);
+ nv_icmd(dev, 0x000009f5, 0x00000000);
+ nv_icmd(dev, 0x000009f6, 0x00000000);
+ nv_icmd(dev, 0x000009f7, 0x00000000);
+ nv_icmd(dev, 0x000009f8, 0x00000000);
+ nv_icmd(dev, 0x000009f9, 0x00000000);
+ nv_icmd(dev, 0x000009fa, 0x00000000);
+ nv_icmd(dev, 0x000009fb, 0x00000000);
+ nv_icmd(dev, 0x000009fc, 0x00000000);
+ nv_icmd(dev, 0x000009fd, 0x00000000);
+ nv_icmd(dev, 0x000009fe, 0x00000000);
+ nv_icmd(dev, 0x000009ff, 0x00000000);
+ nv_icmd(dev, 0x00000468, 0x00000004);
+ nv_icmd(dev, 0x0000046c, 0x00000001);
+ nv_icmd(dev, 0x00000470, 0x00000000);
+ nv_icmd(dev, 0x00000471, 0x00000000);
+ nv_icmd(dev, 0x00000472, 0x00000000);
+ nv_icmd(dev, 0x00000473, 0x00000000);
+ nv_icmd(dev, 0x00000474, 0x00000000);
+ nv_icmd(dev, 0x00000475, 0x00000000);
+ nv_icmd(dev, 0x00000476, 0x00000000);
+ nv_icmd(dev, 0x00000477, 0x00000000);
+ nv_icmd(dev, 0x00000478, 0x00000000);
+ nv_icmd(dev, 0x00000479, 0x00000000);
+ nv_icmd(dev, 0x0000047a, 0x00000000);
+ nv_icmd(dev, 0x0000047b, 0x00000000);
+ nv_icmd(dev, 0x0000047c, 0x00000000);
+ nv_icmd(dev, 0x0000047d, 0x00000000);
+ nv_icmd(dev, 0x0000047e, 0x00000000);
+ nv_icmd(dev, 0x0000047f, 0x00000000);
+ nv_icmd(dev, 0x00000480, 0x00000000);
+ nv_icmd(dev, 0x00000481, 0x00000000);
+ nv_icmd(dev, 0x00000482, 0x00000000);
+ nv_icmd(dev, 0x00000483, 0x00000000);
+ nv_icmd(dev, 0x00000484, 0x00000000);
+ nv_icmd(dev, 0x00000485, 0x00000000);
+ nv_icmd(dev, 0x00000486, 0x00000000);
+ nv_icmd(dev, 0x00000487, 0x00000000);
+ nv_icmd(dev, 0x00000488, 0x00000000);
+ nv_icmd(dev, 0x00000489, 0x00000000);
+ nv_icmd(dev, 0x0000048a, 0x00000000);
+ nv_icmd(dev, 0x0000048b, 0x00000000);
+ nv_icmd(dev, 0x0000048c, 0x00000000);
+ nv_icmd(dev, 0x0000048d, 0x00000000);
+ nv_icmd(dev, 0x0000048e, 0x00000000);
+ nv_icmd(dev, 0x0000048f, 0x00000000);
+ nv_icmd(dev, 0x00000490, 0x00000000);
+ nv_icmd(dev, 0x00000491, 0x00000000);
+ nv_icmd(dev, 0x00000492, 0x00000000);
+ nv_icmd(dev, 0x00000493, 0x00000000);
+ nv_icmd(dev, 0x00000494, 0x00000000);
+ nv_icmd(dev, 0x00000495, 0x00000000);
+ nv_icmd(dev, 0x00000496, 0x00000000);
+ nv_icmd(dev, 0x00000497, 0x00000000);
+ nv_icmd(dev, 0x00000498, 0x00000000);
+ nv_icmd(dev, 0x00000499, 0x00000000);
+ nv_icmd(dev, 0x0000049a, 0x00000000);
+ nv_icmd(dev, 0x0000049b, 0x00000000);
+ nv_icmd(dev, 0x0000049c, 0x00000000);
+ nv_icmd(dev, 0x0000049d, 0x00000000);
+ nv_icmd(dev, 0x0000049e, 0x00000000);
+ nv_icmd(dev, 0x0000049f, 0x00000000);
+ nv_icmd(dev, 0x000004a0, 0x00000000);
+ nv_icmd(dev, 0x000004a1, 0x00000000);
+ nv_icmd(dev, 0x000004a2, 0x00000000);
+ nv_icmd(dev, 0x000004a3, 0x00000000);
+ nv_icmd(dev, 0x000004a4, 0x00000000);
+ nv_icmd(dev, 0x000004a5, 0x00000000);
+ nv_icmd(dev, 0x000004a6, 0x00000000);
+ nv_icmd(dev, 0x000004a7, 0x00000000);
+ nv_icmd(dev, 0x000004a8, 0x00000000);
+ nv_icmd(dev, 0x000004a9, 0x00000000);
+ nv_icmd(dev, 0x000004aa, 0x00000000);
+ nv_icmd(dev, 0x000004ab, 0x00000000);
+ nv_icmd(dev, 0x000004ac, 0x00000000);
+ nv_icmd(dev, 0x000004ad, 0x00000000);
+ nv_icmd(dev, 0x000004ae, 0x00000000);
+ nv_icmd(dev, 0x000004af, 0x00000000);
+ nv_icmd(dev, 0x000004b0, 0x00000000);
+ nv_icmd(dev, 0x000004b1, 0x00000000);
+ nv_icmd(dev, 0x000004b2, 0x00000000);
+ nv_icmd(dev, 0x000004b3, 0x00000000);
+ nv_icmd(dev, 0x000004b4, 0x00000000);
+ nv_icmd(dev, 0x000004b5, 0x00000000);
+ nv_icmd(dev, 0x000004b6, 0x00000000);
+ nv_icmd(dev, 0x000004b7, 0x00000000);
+ nv_icmd(dev, 0x000004b8, 0x00000000);
+ nv_icmd(dev, 0x000004b9, 0x00000000);
+ nv_icmd(dev, 0x000004ba, 0x00000000);
+ nv_icmd(dev, 0x000004bb, 0x00000000);
+ nv_icmd(dev, 0x000004bc, 0x00000000);
+ nv_icmd(dev, 0x000004bd, 0x00000000);
+ nv_icmd(dev, 0x000004be, 0x00000000);
+ nv_icmd(dev, 0x000004bf, 0x00000000);
+ nv_icmd(dev, 0x000004c0, 0x00000000);
+ nv_icmd(dev, 0x000004c1, 0x00000000);
+ nv_icmd(dev, 0x000004c2, 0x00000000);
+ nv_icmd(dev, 0x000004c3, 0x00000000);
+ nv_icmd(dev, 0x000004c4, 0x00000000);
+ nv_icmd(dev, 0x000004c5, 0x00000000);
+ nv_icmd(dev, 0x000004c6, 0x00000000);
+ nv_icmd(dev, 0x000004c7, 0x00000000);
+ nv_icmd(dev, 0x000004c8, 0x00000000);
+ nv_icmd(dev, 0x000004c9, 0x00000000);
+ nv_icmd(dev, 0x000004ca, 0x00000000);
+ nv_icmd(dev, 0x000004cb, 0x00000000);
+ nv_icmd(dev, 0x000004cc, 0x00000000);
+ nv_icmd(dev, 0x000004cd, 0x00000000);
+ nv_icmd(dev, 0x000004ce, 0x00000000);
+ nv_icmd(dev, 0x000004cf, 0x00000000);
+ nv_icmd(dev, 0x00000510, 0x3f800000);
+ nv_icmd(dev, 0x00000511, 0x3f800000);
+ nv_icmd(dev, 0x00000512, 0x3f800000);
+ nv_icmd(dev, 0x00000513, 0x3f800000);
+ nv_icmd(dev, 0x00000514, 0x3f800000);
+ nv_icmd(dev, 0x00000515, 0x3f800000);
+ nv_icmd(dev, 0x00000516, 0x3f800000);
+ nv_icmd(dev, 0x00000517, 0x3f800000);
+ nv_icmd(dev, 0x00000518, 0x3f800000);
+ nv_icmd(dev, 0x00000519, 0x3f800000);
+ nv_icmd(dev, 0x0000051a, 0x3f800000);
+ nv_icmd(dev, 0x0000051b, 0x3f800000);
+ nv_icmd(dev, 0x0000051c, 0x3f800000);
+ nv_icmd(dev, 0x0000051d, 0x3f800000);
+ nv_icmd(dev, 0x0000051e, 0x3f800000);
+ nv_icmd(dev, 0x0000051f, 0x3f800000);
+ nv_icmd(dev, 0x00000520, 0x000002b6);
+ nv_icmd(dev, 0x00000529, 0x00000001);
+ nv_icmd(dev, 0x00000530, 0xffff0000);
+ nv_icmd(dev, 0x00000531, 0xffff0000);
+ nv_icmd(dev, 0x00000532, 0xffff0000);
+ nv_icmd(dev, 0x00000533, 0xffff0000);
+ nv_icmd(dev, 0x00000534, 0xffff0000);
+ nv_icmd(dev, 0x00000535, 0xffff0000);
+ nv_icmd(dev, 0x00000536, 0xffff0000);
+ nv_icmd(dev, 0x00000537, 0xffff0000);
+ nv_icmd(dev, 0x00000538, 0xffff0000);
+ nv_icmd(dev, 0x00000539, 0xffff0000);
+ nv_icmd(dev, 0x0000053a, 0xffff0000);
+ nv_icmd(dev, 0x0000053b, 0xffff0000);
+ nv_icmd(dev, 0x0000053c, 0xffff0000);
+ nv_icmd(dev, 0x0000053d, 0xffff0000);
+ nv_icmd(dev, 0x0000053e, 0xffff0000);
+ nv_icmd(dev, 0x0000053f, 0xffff0000);
+ nv_icmd(dev, 0x00000585, 0x0000003f);
+ nv_icmd(dev, 0x00000576, 0x00000003);
+ if (dev_priv->chipset == 0xc1 ||
+ dev_priv->chipset == 0xd9)
+ nv_icmd(dev, 0x0000057b, 0x00000059);
+ nv_icmd(dev, 0x00000586, 0x00000040);
+ nv_icmd(dev, 0x00000582, 0x00000080);
+ nv_icmd(dev, 0x00000583, 0x00000080);
+ nv_icmd(dev, 0x000005c2, 0x00000001);
+ nv_icmd(dev, 0x00000638, 0x00000001);
+ nv_icmd(dev, 0x00000639, 0x00000001);
+ nv_icmd(dev, 0x0000063a, 0x00000002);
+ nv_icmd(dev, 0x0000063b, 0x00000001);
+ nv_icmd(dev, 0x0000063c, 0x00000001);
+ nv_icmd(dev, 0x0000063d, 0x00000002);
+ nv_icmd(dev, 0x0000063e, 0x00000001);
+ nv_icmd(dev, 0x000008b8, 0x00000001);
+ nv_icmd(dev, 0x000008b9, 0x00000001);
+ nv_icmd(dev, 0x000008ba, 0x00000001);
+ nv_icmd(dev, 0x000008bb, 0x00000001);
+ nv_icmd(dev, 0x000008bc, 0x00000001);
+ nv_icmd(dev, 0x000008bd, 0x00000001);
+ nv_icmd(dev, 0x000008be, 0x00000001);
+ nv_icmd(dev, 0x000008bf, 0x00000001);
+ nv_icmd(dev, 0x00000900, 0x00000001);
+ nv_icmd(dev, 0x00000901, 0x00000001);
+ nv_icmd(dev, 0x00000902, 0x00000001);
+ nv_icmd(dev, 0x00000903, 0x00000001);
+ nv_icmd(dev, 0x00000904, 0x00000001);
+ nv_icmd(dev, 0x00000905, 0x00000001);
+ nv_icmd(dev, 0x00000906, 0x00000001);
+ nv_icmd(dev, 0x00000907, 0x00000001);
+ nv_icmd(dev, 0x00000908, 0x00000002);
+ nv_icmd(dev, 0x00000909, 0x00000002);
+ nv_icmd(dev, 0x0000090a, 0x00000002);
+ nv_icmd(dev, 0x0000090b, 0x00000002);
+ nv_icmd(dev, 0x0000090c, 0x00000002);
+ nv_icmd(dev, 0x0000090d, 0x00000002);
+ nv_icmd(dev, 0x0000090e, 0x00000002);
+ nv_icmd(dev, 0x0000090f, 0x00000002);
+ nv_icmd(dev, 0x00000910, 0x00000001);
+ nv_icmd(dev, 0x00000911, 0x00000001);
+ nv_icmd(dev, 0x00000912, 0x00000001);
+ nv_icmd(dev, 0x00000913, 0x00000001);
+ nv_icmd(dev, 0x00000914, 0x00000001);
+ nv_icmd(dev, 0x00000915, 0x00000001);
+ nv_icmd(dev, 0x00000916, 0x00000001);
+ nv_icmd(dev, 0x00000917, 0x00000001);
+ nv_icmd(dev, 0x00000918, 0x00000001);
+ nv_icmd(dev, 0x00000919, 0x00000001);
+ nv_icmd(dev, 0x0000091a, 0x00000001);
+ nv_icmd(dev, 0x0000091b, 0x00000001);
+ nv_icmd(dev, 0x0000091c, 0x00000001);
+ nv_icmd(dev, 0x0000091d, 0x00000001);
+ nv_icmd(dev, 0x0000091e, 0x00000001);
+ nv_icmd(dev, 0x0000091f, 0x00000001);
+ nv_icmd(dev, 0x00000920, 0x00000002);
+ nv_icmd(dev, 0x00000921, 0x00000002);
+ nv_icmd(dev, 0x00000922, 0x00000002);
+ nv_icmd(dev, 0x00000923, 0x00000002);
+ nv_icmd(dev, 0x00000924, 0x00000002);
+ nv_icmd(dev, 0x00000925, 0x00000002);
+ nv_icmd(dev, 0x00000926, 0x00000002);
+ nv_icmd(dev, 0x00000927, 0x00000002);
+ nv_icmd(dev, 0x00000928, 0x00000001);
+ nv_icmd(dev, 0x00000929, 0x00000001);
+ nv_icmd(dev, 0x0000092a, 0x00000001);
+ nv_icmd(dev, 0x0000092b, 0x00000001);
+ nv_icmd(dev, 0x0000092c, 0x00000001);
+ nv_icmd(dev, 0x0000092d, 0x00000001);
+ nv_icmd(dev, 0x0000092e, 0x00000001);
+ nv_icmd(dev, 0x0000092f, 0x00000001);
+ nv_icmd(dev, 0x00000648, 0x00000001);
+ nv_icmd(dev, 0x00000649, 0x00000001);
+ nv_icmd(dev, 0x0000064a, 0x00000001);
+ nv_icmd(dev, 0x0000064b, 0x00000001);
+ nv_icmd(dev, 0x0000064c, 0x00000001);
+ nv_icmd(dev, 0x0000064d, 0x00000001);
+ nv_icmd(dev, 0x0000064e, 0x00000001);
+ nv_icmd(dev, 0x0000064f, 0x00000001);
+ nv_icmd(dev, 0x00000650, 0x00000001);
+ nv_icmd(dev, 0x00000658, 0x0000000f);
+ nv_icmd(dev, 0x000007ff, 0x0000000a);
+ nv_icmd(dev, 0x0000066a, 0x40000000);
+ nv_icmd(dev, 0x0000066b, 0x10000000);
+ nv_icmd(dev, 0x0000066c, 0xffff0000);
+ nv_icmd(dev, 0x0000066d, 0xffff0000);
+ nv_icmd(dev, 0x000007af, 0x00000008);
+ nv_icmd(dev, 0x000007b0, 0x00000008);
+ nv_icmd(dev, 0x000007f6, 0x00000001);
+ nv_icmd(dev, 0x000006b2, 0x00000055);
+ nv_icmd(dev, 0x000007ad, 0x00000003);
+ nv_icmd(dev, 0x00000937, 0x00000001);
+ nv_icmd(dev, 0x00000971, 0x00000008);
+ nv_icmd(dev, 0x00000972, 0x00000040);
+ nv_icmd(dev, 0x00000973, 0x0000012c);
+ nv_icmd(dev, 0x0000097c, 0x00000040);
+ nv_icmd(dev, 0x00000979, 0x00000003);
+ nv_icmd(dev, 0x00000975, 0x00000020);
+ nv_icmd(dev, 0x00000976, 0x00000001);
+ nv_icmd(dev, 0x00000977, 0x00000020);
+ nv_icmd(dev, 0x00000978, 0x00000001);
+ nv_icmd(dev, 0x00000957, 0x00000003);
+ nv_icmd(dev, 0x0000095e, 0x20164010);
+ nv_icmd(dev, 0x0000095f, 0x00000020);
+ if (dev_priv->chipset == 0xd9)
+ nv_icmd(dev, 0x0000097d, 0x00000020);
+ nv_icmd(dev, 0x00000683, 0x00000006);
+ nv_icmd(dev, 0x00000685, 0x003fffff);
+ nv_icmd(dev, 0x00000687, 0x00000c48);
+ nv_icmd(dev, 0x000006a0, 0x00000005);
+ nv_icmd(dev, 0x00000840, 0x00300008);
+ nv_icmd(dev, 0x00000841, 0x04000080);
+ nv_icmd(dev, 0x00000842, 0x00300008);
+ nv_icmd(dev, 0x00000843, 0x04000080);
+ nv_icmd(dev, 0x00000818, 0x00000000);
+ nv_icmd(dev, 0x00000819, 0x00000000);
+ nv_icmd(dev, 0x0000081a, 0x00000000);
+ nv_icmd(dev, 0x0000081b, 0x00000000);
+ nv_icmd(dev, 0x0000081c, 0x00000000);
+ nv_icmd(dev, 0x0000081d, 0x00000000);
+ nv_icmd(dev, 0x0000081e, 0x00000000);
+ nv_icmd(dev, 0x0000081f, 0x00000000);
+ nv_icmd(dev, 0x00000848, 0x00000000);
+ nv_icmd(dev, 0x00000849, 0x00000000);
+ nv_icmd(dev, 0x0000084a, 0x00000000);
+ nv_icmd(dev, 0x0000084b, 0x00000000);
+ nv_icmd(dev, 0x0000084c, 0x00000000);
+ nv_icmd(dev, 0x0000084d, 0x00000000);
+ nv_icmd(dev, 0x0000084e, 0x00000000);
+ nv_icmd(dev, 0x0000084f, 0x00000000);
+ nv_icmd(dev, 0x00000850, 0x00000000);
+ nv_icmd(dev, 0x00000851, 0x00000000);
+ nv_icmd(dev, 0x00000852, 0x00000000);
+ nv_icmd(dev, 0x00000853, 0x00000000);
+ nv_icmd(dev, 0x00000854, 0x00000000);
+ nv_icmd(dev, 0x00000855, 0x00000000);
+ nv_icmd(dev, 0x00000856, 0x00000000);
+ nv_icmd(dev, 0x00000857, 0x00000000);
+ nv_icmd(dev, 0x00000738, 0x00000000);
+ nv_icmd(dev, 0x000006aa, 0x00000001);
+ nv_icmd(dev, 0x000006ab, 0x00000002);
+ nv_icmd(dev, 0x000006ac, 0x00000080);
+ nv_icmd(dev, 0x000006ad, 0x00000100);
+ nv_icmd(dev, 0x000006ae, 0x00000100);
+ nv_icmd(dev, 0x000006b1, 0x00000011);
+ nv_icmd(dev, 0x000006bb, 0x000000cf);
+ nv_icmd(dev, 0x000006ce, 0x2a712488);
+ nv_icmd(dev, 0x00000739, 0x4085c000);
+ nv_icmd(dev, 0x0000073a, 0x00000080);
+ nv_icmd(dev, 0x00000786, 0x80000100);
+ nv_icmd(dev, 0x0000073c, 0x00010100);
+ nv_icmd(dev, 0x0000073d, 0x02800000);
+ nv_icmd(dev, 0x00000787, 0x000000cf);
+ nv_icmd(dev, 0x0000078c, 0x00000008);
+ nv_icmd(dev, 0x00000792, 0x00000001);
+ nv_icmd(dev, 0x00000794, 0x00000001);
+ nv_icmd(dev, 0x00000795, 0x00000001);
+ nv_icmd(dev, 0x00000796, 0x00000001);
+ nv_icmd(dev, 0x00000797, 0x000000cf);
+ nv_icmd(dev, 0x00000836, 0x00000001);
+ nv_icmd(dev, 0x0000079a, 0x00000002);
+ nv_icmd(dev, 0x00000833, 0x04444480);
+ nv_icmd(dev, 0x000007a1, 0x00000001);
+ nv_icmd(dev, 0x000007a3, 0x00000001);
+ nv_icmd(dev, 0x000007a4, 0x00000001);
+ nv_icmd(dev, 0x000007a5, 0x00000001);
+ nv_icmd(dev, 0x00000831, 0x00000004);
+ nv_icmd(dev, 0x0000080c, 0x00000002);
+ nv_icmd(dev, 0x0000080d, 0x00000100);
+ nv_icmd(dev, 0x0000080e, 0x00000100);
+ nv_icmd(dev, 0x0000080f, 0x00000001);
+ nv_icmd(dev, 0x00000823, 0x00000002);
+ nv_icmd(dev, 0x00000824, 0x00000100);
+ nv_icmd(dev, 0x00000825, 0x00000100);
+ nv_icmd(dev, 0x00000826, 0x00000001);
+ nv_icmd(dev, 0x0000095d, 0x00000001);
+ nv_icmd(dev, 0x0000082b, 0x00000004);
+ nv_icmd(dev, 0x00000942, 0x00010001);
+ nv_icmd(dev, 0x00000943, 0x00000001);
+ nv_icmd(dev, 0x00000944, 0x00000022);
+ nv_icmd(dev, 0x000007c5, 0x00010001);
+ nv_icmd(dev, 0x00000834, 0x00000001);
+ nv_icmd(dev, 0x000007c7, 0x00000001);
+ nv_icmd(dev, 0x0000c1b0, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b1, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b2, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b3, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b4, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b5, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b6, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b7, 0x0000000f);
+ nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
+ nv_icmd(dev, 0x0000c1b9, 0x00fac688);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000002);
+ nv_icmd(dev, 0x000006aa, 0x00000001);
+ nv_icmd(dev, 0x000006ad, 0x00000100);
+ nv_icmd(dev, 0x000006ae, 0x00000100);
+ nv_icmd(dev, 0x000006b1, 0x00000011);
+ nv_icmd(dev, 0x0000078c, 0x00000008);
+ nv_icmd(dev, 0x00000792, 0x00000001);
+ nv_icmd(dev, 0x00000794, 0x00000001);
+ nv_icmd(dev, 0x00000795, 0x00000001);
+ nv_icmd(dev, 0x00000796, 0x00000001);
+ nv_icmd(dev, 0x00000797, 0x000000cf);
+ nv_icmd(dev, 0x0000079a, 0x00000002);
+ nv_icmd(dev, 0x00000833, 0x04444480);
+ nv_icmd(dev, 0x000007a1, 0x00000001);
+ nv_icmd(dev, 0x000007a3, 0x00000001);
+ nv_icmd(dev, 0x000007a4, 0x00000001);
+ nv_icmd(dev, 0x000007a5, 0x00000001);
+ nv_icmd(dev, 0x00000831, 0x00000004);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000014);
+ nv_icmd(dev, 0x00000351, 0x00000100);
+ nv_icmd(dev, 0x00000957, 0x00000003);
+ nv_icmd(dev, 0x0000095d, 0x00000001);
+ nv_icmd(dev, 0x0000082b, 0x00000004);
+ nv_icmd(dev, 0x00000942, 0x00010001);
+ nv_icmd(dev, 0x00000943, 0x00000001);
+ nv_icmd(dev, 0x000007c5, 0x00010001);
+ nv_icmd(dev, 0x00000834, 0x00000001);
+ nv_icmd(dev, 0x000007c7, 0x00000001);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_icmd(dev, 0x00001000, 0x00000001);
+ nv_icmd(dev, 0x0000080c, 0x00000002);
+ nv_icmd(dev, 0x0000080d, 0x00000100);
+ nv_icmd(dev, 0x0000080e, 0x00000100);
+ nv_icmd(dev, 0x0000080f, 0x00000001);
+ nv_icmd(dev, 0x00000823, 0x00000002);
+ nv_icmd(dev, 0x00000824, 0x00000100);
+ nv_icmd(dev, 0x00000825, 0x00000100);
+ nv_icmd(dev, 0x00000826, 0x00000001);
+ nv_icmd(dev, 0x0001e100, 0x00000001);
+ nv_wr32(dev, 0x400208, 0x00000000);
+ nv_wr32(dev, 0x404154, 0x00000400);
+
+ nvc0_grctx_generate_9097(dev);
+ if (fermi >= 0x9197)
+ nvc0_grctx_generate_9197(dev);
+ if (fermi >= 0x9297)
+ nvc0_grctx_generate_9297(dev);
+ nvc0_grctx_generate_902d(dev);
+ nvc0_grctx_generate_9039(dev);
+ nvc0_grctx_generate_90c0(dev);
+
+ nv_wr32(dev, 0x000260, r000260);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <core/mm.h>
+#include "nve0.h"
+
+static void
+nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
+{
+ nv_wr32(dev, 0x400204, data);
+ nv_wr32(dev, 0x400200, icmd);
+ while (nv_rd32(dev, 0x400700) & 0x00000002) {}
+}
+
+static void
+nve0_grctx_generate_icmd(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400208, 0x80000000);
+ nv_icmd(dev, 0x001000, 0x00000004);
+ nv_icmd(dev, 0x000039, 0x00000000);
+ nv_icmd(dev, 0x00003a, 0x00000000);
+ nv_icmd(dev, 0x00003b, 0x00000000);
+ nv_icmd(dev, 0x0000a9, 0x0000ffff);
+ nv_icmd(dev, 0x000038, 0x0fac6881);
+ nv_icmd(dev, 0x00003d, 0x00000001);
+ nv_icmd(dev, 0x0000e8, 0x00000400);
+ nv_icmd(dev, 0x0000e9, 0x00000400);
+ nv_icmd(dev, 0x0000ea, 0x00000400);
+ nv_icmd(dev, 0x0000eb, 0x00000400);
+ nv_icmd(dev, 0x0000ec, 0x00000400);
+ nv_icmd(dev, 0x0000ed, 0x00000400);
+ nv_icmd(dev, 0x0000ee, 0x00000400);
+ nv_icmd(dev, 0x0000ef, 0x00000400);
+ nv_icmd(dev, 0x000078, 0x00000300);
+ nv_icmd(dev, 0x000079, 0x00000300);
+ nv_icmd(dev, 0x00007a, 0x00000300);
+ nv_icmd(dev, 0x00007b, 0x00000300);
+ nv_icmd(dev, 0x00007c, 0x00000300);
+ nv_icmd(dev, 0x00007d, 0x00000300);
+ nv_icmd(dev, 0x00007e, 0x00000300);
+ nv_icmd(dev, 0x00007f, 0x00000300);
+ nv_icmd(dev, 0x000050, 0x00000011);
+ nv_icmd(dev, 0x000058, 0x00000008);
+ nv_icmd(dev, 0x000059, 0x00000008);
+ nv_icmd(dev, 0x00005a, 0x00000008);
+ nv_icmd(dev, 0x00005b, 0x00000008);
+ nv_icmd(dev, 0x00005c, 0x00000008);
+ nv_icmd(dev, 0x00005d, 0x00000008);
+ nv_icmd(dev, 0x00005e, 0x00000008);
+ nv_icmd(dev, 0x00005f, 0x00000008);
+ nv_icmd(dev, 0x000208, 0x00000001);
+ nv_icmd(dev, 0x000209, 0x00000001);
+ nv_icmd(dev, 0x00020a, 0x00000001);
+ nv_icmd(dev, 0x00020b, 0x00000001);
+ nv_icmd(dev, 0x00020c, 0x00000001);
+ nv_icmd(dev, 0x00020d, 0x00000001);
+ nv_icmd(dev, 0x00020e, 0x00000001);
+ nv_icmd(dev, 0x00020f, 0x00000001);
+ nv_icmd(dev, 0x000081, 0x00000001);
+ nv_icmd(dev, 0x000085, 0x00000004);
+ nv_icmd(dev, 0x000088, 0x00000400);
+ nv_icmd(dev, 0x000090, 0x00000300);
+ nv_icmd(dev, 0x000098, 0x00001001);
+ nv_icmd(dev, 0x0000e3, 0x00000001);
+ nv_icmd(dev, 0x0000da, 0x00000001);
+ nv_icmd(dev, 0x0000f8, 0x00000003);
+ nv_icmd(dev, 0x0000fa, 0x00000001);
+ nv_icmd(dev, 0x00009f, 0x0000ffff);
+ nv_icmd(dev, 0x0000a0, 0x0000ffff);
+ nv_icmd(dev, 0x0000a1, 0x0000ffff);
+ nv_icmd(dev, 0x0000a2, 0x0000ffff);
+ nv_icmd(dev, 0x0000b1, 0x00000001);
+ nv_icmd(dev, 0x0000ad, 0x0000013e);
+ nv_icmd(dev, 0x0000e1, 0x00000010);
+ nv_icmd(dev, 0x000290, 0x00000000);
+ nv_icmd(dev, 0x000291, 0x00000000);
+ nv_icmd(dev, 0x000292, 0x00000000);
+ nv_icmd(dev, 0x000293, 0x00000000);
+ nv_icmd(dev, 0x000294, 0x00000000);
+ nv_icmd(dev, 0x000295, 0x00000000);
+ nv_icmd(dev, 0x000296, 0x00000000);
+ nv_icmd(dev, 0x000297, 0x00000000);
+ nv_icmd(dev, 0x000298, 0x00000000);
+ nv_icmd(dev, 0x000299, 0x00000000);
+ nv_icmd(dev, 0x00029a, 0x00000000);
+ nv_icmd(dev, 0x00029b, 0x00000000);
+ nv_icmd(dev, 0x00029c, 0x00000000);
+ nv_icmd(dev, 0x00029d, 0x00000000);
+ nv_icmd(dev, 0x00029e, 0x00000000);
+ nv_icmd(dev, 0x00029f, 0x00000000);
+ nv_icmd(dev, 0x0003b0, 0x00000000);
+ nv_icmd(dev, 0x0003b1, 0x00000000);
+ nv_icmd(dev, 0x0003b2, 0x00000000);
+ nv_icmd(dev, 0x0003b3, 0x00000000);
+ nv_icmd(dev, 0x0003b4, 0x00000000);
+ nv_icmd(dev, 0x0003b5, 0x00000000);
+ nv_icmd(dev, 0x0003b6, 0x00000000);
+ nv_icmd(dev, 0x0003b7, 0x00000000);
+ nv_icmd(dev, 0x0003b8, 0x00000000);
+ nv_icmd(dev, 0x0003b9, 0x00000000);
+ nv_icmd(dev, 0x0003ba, 0x00000000);
+ nv_icmd(dev, 0x0003bb, 0x00000000);
+ nv_icmd(dev, 0x0003bc, 0x00000000);
+ nv_icmd(dev, 0x0003bd, 0x00000000);
+ nv_icmd(dev, 0x0003be, 0x00000000);
+ nv_icmd(dev, 0x0003bf, 0x00000000);
+ nv_icmd(dev, 0x0002a0, 0x00000000);
+ nv_icmd(dev, 0x0002a1, 0x00000000);
+ nv_icmd(dev, 0x0002a2, 0x00000000);
+ nv_icmd(dev, 0x0002a3, 0x00000000);
+ nv_icmd(dev, 0x0002a4, 0x00000000);
+ nv_icmd(dev, 0x0002a5, 0x00000000);
+ nv_icmd(dev, 0x0002a6, 0x00000000);
+ nv_icmd(dev, 0x0002a7, 0x00000000);
+ nv_icmd(dev, 0x0002a8, 0x00000000);
+ nv_icmd(dev, 0x0002a9, 0x00000000);
+ nv_icmd(dev, 0x0002aa, 0x00000000);
+ nv_icmd(dev, 0x0002ab, 0x00000000);
+ nv_icmd(dev, 0x0002ac, 0x00000000);
+ nv_icmd(dev, 0x0002ad, 0x00000000);
+ nv_icmd(dev, 0x0002ae, 0x00000000);
+ nv_icmd(dev, 0x0002af, 0x00000000);
+ nv_icmd(dev, 0x000420, 0x00000000);
+ nv_icmd(dev, 0x000421, 0x00000000);
+ nv_icmd(dev, 0x000422, 0x00000000);
+ nv_icmd(dev, 0x000423, 0x00000000);
+ nv_icmd(dev, 0x000424, 0x00000000);
+ nv_icmd(dev, 0x000425, 0x00000000);
+ nv_icmd(dev, 0x000426, 0x00000000);
+ nv_icmd(dev, 0x000427, 0x00000000);
+ nv_icmd(dev, 0x000428, 0x00000000);
+ nv_icmd(dev, 0x000429, 0x00000000);
+ nv_icmd(dev, 0x00042a, 0x00000000);
+ nv_icmd(dev, 0x00042b, 0x00000000);
+ nv_icmd(dev, 0x00042c, 0x00000000);
+ nv_icmd(dev, 0x00042d, 0x00000000);
+ nv_icmd(dev, 0x00042e, 0x00000000);
+ nv_icmd(dev, 0x00042f, 0x00000000);
+ nv_icmd(dev, 0x0002b0, 0x00000000);
+ nv_icmd(dev, 0x0002b1, 0x00000000);
+ nv_icmd(dev, 0x0002b2, 0x00000000);
+ nv_icmd(dev, 0x0002b3, 0x00000000);
+ nv_icmd(dev, 0x0002b4, 0x00000000);
+ nv_icmd(dev, 0x0002b5, 0x00000000);
+ nv_icmd(dev, 0x0002b6, 0x00000000);
+ nv_icmd(dev, 0x0002b7, 0x00000000);
+ nv_icmd(dev, 0x0002b8, 0x00000000);
+ nv_icmd(dev, 0x0002b9, 0x00000000);
+ nv_icmd(dev, 0x0002ba, 0x00000000);
+ nv_icmd(dev, 0x0002bb, 0x00000000);
+ nv_icmd(dev, 0x0002bc, 0x00000000);
+ nv_icmd(dev, 0x0002bd, 0x00000000);
+ nv_icmd(dev, 0x0002be, 0x00000000);
+ nv_icmd(dev, 0x0002bf, 0x00000000);
+ nv_icmd(dev, 0x000430, 0x00000000);
+ nv_icmd(dev, 0x000431, 0x00000000);
+ nv_icmd(dev, 0x000432, 0x00000000);
+ nv_icmd(dev, 0x000433, 0x00000000);
+ nv_icmd(dev, 0x000434, 0x00000000);
+ nv_icmd(dev, 0x000435, 0x00000000);
+ nv_icmd(dev, 0x000436, 0x00000000);
+ nv_icmd(dev, 0x000437, 0x00000000);
+ nv_icmd(dev, 0x000438, 0x00000000);
+ nv_icmd(dev, 0x000439, 0x00000000);
+ nv_icmd(dev, 0x00043a, 0x00000000);
+ nv_icmd(dev, 0x00043b, 0x00000000);
+ nv_icmd(dev, 0x00043c, 0x00000000);
+ nv_icmd(dev, 0x00043d, 0x00000000);
+ nv_icmd(dev, 0x00043e, 0x00000000);
+ nv_icmd(dev, 0x00043f, 0x00000000);
+ nv_icmd(dev, 0x0002c0, 0x00000000);
+ nv_icmd(dev, 0x0002c1, 0x00000000);
+ nv_icmd(dev, 0x0002c2, 0x00000000);
+ nv_icmd(dev, 0x0002c3, 0x00000000);
+ nv_icmd(dev, 0x0002c4, 0x00000000);
+ nv_icmd(dev, 0x0002c5, 0x00000000);
+ nv_icmd(dev, 0x0002c6, 0x00000000);
+ nv_icmd(dev, 0x0002c7, 0x00000000);
+ nv_icmd(dev, 0x0002c8, 0x00000000);
+ nv_icmd(dev, 0x0002c9, 0x00000000);
+ nv_icmd(dev, 0x0002ca, 0x00000000);
+ nv_icmd(dev, 0x0002cb, 0x00000000);
+ nv_icmd(dev, 0x0002cc, 0x00000000);
+ nv_icmd(dev, 0x0002cd, 0x00000000);
+ nv_icmd(dev, 0x0002ce, 0x00000000);
+ nv_icmd(dev, 0x0002cf, 0x00000000);
+ nv_icmd(dev, 0x0004d0, 0x00000000);
+ nv_icmd(dev, 0x0004d1, 0x00000000);
+ nv_icmd(dev, 0x0004d2, 0x00000000);
+ nv_icmd(dev, 0x0004d3, 0x00000000);
+ nv_icmd(dev, 0x0004d4, 0x00000000);
+ nv_icmd(dev, 0x0004d5, 0x00000000);
+ nv_icmd(dev, 0x0004d6, 0x00000000);
+ nv_icmd(dev, 0x0004d7, 0x00000000);
+ nv_icmd(dev, 0x0004d8, 0x00000000);
+ nv_icmd(dev, 0x0004d9, 0x00000000);
+ nv_icmd(dev, 0x0004da, 0x00000000);
+ nv_icmd(dev, 0x0004db, 0x00000000);
+ nv_icmd(dev, 0x0004dc, 0x00000000);
+ nv_icmd(dev, 0x0004dd, 0x00000000);
+ nv_icmd(dev, 0x0004de, 0x00000000);
+ nv_icmd(dev, 0x0004df, 0x00000000);
+ nv_icmd(dev, 0x000720, 0x00000000);
+ nv_icmd(dev, 0x000721, 0x00000000);
+ nv_icmd(dev, 0x000722, 0x00000000);
+ nv_icmd(dev, 0x000723, 0x00000000);
+ nv_icmd(dev, 0x000724, 0x00000000);
+ nv_icmd(dev, 0x000725, 0x00000000);
+ nv_icmd(dev, 0x000726, 0x00000000);
+ nv_icmd(dev, 0x000727, 0x00000000);
+ nv_icmd(dev, 0x000728, 0x00000000);
+ nv_icmd(dev, 0x000729, 0x00000000);
+ nv_icmd(dev, 0x00072a, 0x00000000);
+ nv_icmd(dev, 0x00072b, 0x00000000);
+ nv_icmd(dev, 0x00072c, 0x00000000);
+ nv_icmd(dev, 0x00072d, 0x00000000);
+ nv_icmd(dev, 0x00072e, 0x00000000);
+ nv_icmd(dev, 0x00072f, 0x00000000);
+ nv_icmd(dev, 0x0008c0, 0x00000000);
+ nv_icmd(dev, 0x0008c1, 0x00000000);
+ nv_icmd(dev, 0x0008c2, 0x00000000);
+ nv_icmd(dev, 0x0008c3, 0x00000000);
+ nv_icmd(dev, 0x0008c4, 0x00000000);
+ nv_icmd(dev, 0x0008c5, 0x00000000);
+ nv_icmd(dev, 0x0008c6, 0x00000000);
+ nv_icmd(dev, 0x0008c7, 0x00000000);
+ nv_icmd(dev, 0x0008c8, 0x00000000);
+ nv_icmd(dev, 0x0008c9, 0x00000000);
+ nv_icmd(dev, 0x0008ca, 0x00000000);
+ nv_icmd(dev, 0x0008cb, 0x00000000);
+ nv_icmd(dev, 0x0008cc, 0x00000000);
+ nv_icmd(dev, 0x0008cd, 0x00000000);
+ nv_icmd(dev, 0x0008ce, 0x00000000);
+ nv_icmd(dev, 0x0008cf, 0x00000000);
+ nv_icmd(dev, 0x000890, 0x00000000);
+ nv_icmd(dev, 0x000891, 0x00000000);
+ nv_icmd(dev, 0x000892, 0x00000000);
+ nv_icmd(dev, 0x000893, 0x00000000);
+ nv_icmd(dev, 0x000894, 0x00000000);
+ nv_icmd(dev, 0x000895, 0x00000000);
+ nv_icmd(dev, 0x000896, 0x00000000);
+ nv_icmd(dev, 0x000897, 0x00000000);
+ nv_icmd(dev, 0x000898, 0x00000000);
+ nv_icmd(dev, 0x000899, 0x00000000);
+ nv_icmd(dev, 0x00089a, 0x00000000);
+ nv_icmd(dev, 0x00089b, 0x00000000);
+ nv_icmd(dev, 0x00089c, 0x00000000);
+ nv_icmd(dev, 0x00089d, 0x00000000);
+ nv_icmd(dev, 0x00089e, 0x00000000);
+ nv_icmd(dev, 0x00089f, 0x00000000);
+ nv_icmd(dev, 0x0008e0, 0x00000000);
+ nv_icmd(dev, 0x0008e1, 0x00000000);
+ nv_icmd(dev, 0x0008e2, 0x00000000);
+ nv_icmd(dev, 0x0008e3, 0x00000000);
+ nv_icmd(dev, 0x0008e4, 0x00000000);
+ nv_icmd(dev, 0x0008e5, 0x00000000);
+ nv_icmd(dev, 0x0008e6, 0x00000000);
+ nv_icmd(dev, 0x0008e7, 0x00000000);
+ nv_icmd(dev, 0x0008e8, 0x00000000);
+ nv_icmd(dev, 0x0008e9, 0x00000000);
+ nv_icmd(dev, 0x0008ea, 0x00000000);
+ nv_icmd(dev, 0x0008eb, 0x00000000);
+ nv_icmd(dev, 0x0008ec, 0x00000000);
+ nv_icmd(dev, 0x0008ed, 0x00000000);
+ nv_icmd(dev, 0x0008ee, 0x00000000);
+ nv_icmd(dev, 0x0008ef, 0x00000000);
+ nv_icmd(dev, 0x0008a0, 0x00000000);
+ nv_icmd(dev, 0x0008a1, 0x00000000);
+ nv_icmd(dev, 0x0008a2, 0x00000000);
+ nv_icmd(dev, 0x0008a3, 0x00000000);
+ nv_icmd(dev, 0x0008a4, 0x00000000);
+ nv_icmd(dev, 0x0008a5, 0x00000000);
+ nv_icmd(dev, 0x0008a6, 0x00000000);
+ nv_icmd(dev, 0x0008a7, 0x00000000);
+ nv_icmd(dev, 0x0008a8, 0x00000000);
+ nv_icmd(dev, 0x0008a9, 0x00000000);
+ nv_icmd(dev, 0x0008aa, 0x00000000);
+ nv_icmd(dev, 0x0008ab, 0x00000000);
+ nv_icmd(dev, 0x0008ac, 0x00000000);
+ nv_icmd(dev, 0x0008ad, 0x00000000);
+ nv_icmd(dev, 0x0008ae, 0x00000000);
+ nv_icmd(dev, 0x0008af, 0x00000000);
+ nv_icmd(dev, 0x0008f0, 0x00000000);
+ nv_icmd(dev, 0x0008f1, 0x00000000);
+ nv_icmd(dev, 0x0008f2, 0x00000000);
+ nv_icmd(dev, 0x0008f3, 0x00000000);
+ nv_icmd(dev, 0x0008f4, 0x00000000);
+ nv_icmd(dev, 0x0008f5, 0x00000000);
+ nv_icmd(dev, 0x0008f6, 0x00000000);
+ nv_icmd(dev, 0x0008f7, 0x00000000);
+ nv_icmd(dev, 0x0008f8, 0x00000000);
+ nv_icmd(dev, 0x0008f9, 0x00000000);
+ nv_icmd(dev, 0x0008fa, 0x00000000);
+ nv_icmd(dev, 0x0008fb, 0x00000000);
+ nv_icmd(dev, 0x0008fc, 0x00000000);
+ nv_icmd(dev, 0x0008fd, 0x00000000);
+ nv_icmd(dev, 0x0008fe, 0x00000000);
+ nv_icmd(dev, 0x0008ff, 0x00000000);
+ nv_icmd(dev, 0x00094c, 0x000000ff);
+ nv_icmd(dev, 0x00094d, 0xffffffff);
+ nv_icmd(dev, 0x00094e, 0x00000002);
+ nv_icmd(dev, 0x0002ec, 0x00000001);
+ nv_icmd(dev, 0x000303, 0x00000001);
+ nv_icmd(dev, 0x0002e6, 0x00000001);
+ nv_icmd(dev, 0x000466, 0x00000052);
+ nv_icmd(dev, 0x000301, 0x3f800000);
+ nv_icmd(dev, 0x000304, 0x30201000);
+ nv_icmd(dev, 0x000305, 0x70605040);
+ nv_icmd(dev, 0x000306, 0xb8a89888);
+ nv_icmd(dev, 0x000307, 0xf8e8d8c8);
+ nv_icmd(dev, 0x00030a, 0x00ffff00);
+ nv_icmd(dev, 0x00030b, 0x0000001a);
+ nv_icmd(dev, 0x00030c, 0x00000001);
+ nv_icmd(dev, 0x000318, 0x00000001);
+ nv_icmd(dev, 0x000340, 0x00000000);
+ nv_icmd(dev, 0x000375, 0x00000001);
+ nv_icmd(dev, 0x00037d, 0x00000006);
+ nv_icmd(dev, 0x0003a0, 0x00000002);
+ nv_icmd(dev, 0x0003aa, 0x00000001);
+ nv_icmd(dev, 0x0003a9, 0x00000001);
+ nv_icmd(dev, 0x000380, 0x00000001);
+ nv_icmd(dev, 0x000383, 0x00000011);
+ nv_icmd(dev, 0x000360, 0x00000040);
+ nv_icmd(dev, 0x000366, 0x00000000);
+ nv_icmd(dev, 0x000367, 0x00000000);
+ nv_icmd(dev, 0x000368, 0x00000fff);
+ nv_icmd(dev, 0x000370, 0x00000000);
+ nv_icmd(dev, 0x000371, 0x00000000);
+ nv_icmd(dev, 0x000372, 0x000fffff);
+ nv_icmd(dev, 0x00037a, 0x00000012);
+ nv_icmd(dev, 0x000619, 0x00000003);
+ nv_icmd(dev, 0x000811, 0x00000003);
+ nv_icmd(dev, 0x000812, 0x00000004);
+ nv_icmd(dev, 0x000813, 0x00000006);
+ nv_icmd(dev, 0x000814, 0x00000008);
+ nv_icmd(dev, 0x000815, 0x0000000b);
+ nv_icmd(dev, 0x000800, 0x00000001);
+ nv_icmd(dev, 0x000801, 0x00000001);
+ nv_icmd(dev, 0x000802, 0x00000001);
+ nv_icmd(dev, 0x000803, 0x00000001);
+ nv_icmd(dev, 0x000804, 0x00000001);
+ nv_icmd(dev, 0x000805, 0x00000001);
+ nv_icmd(dev, 0x000632, 0x00000001);
+ nv_icmd(dev, 0x000633, 0x00000002);
+ nv_icmd(dev, 0x000634, 0x00000003);
+ nv_icmd(dev, 0x000635, 0x00000004);
+ nv_icmd(dev, 0x000654, 0x3f800000);
+ nv_icmd(dev, 0x000657, 0x3f800000);
+ nv_icmd(dev, 0x000655, 0x3f800000);
+ nv_icmd(dev, 0x000656, 0x3f800000);
+ nv_icmd(dev, 0x0006cd, 0x3f800000);
+ nv_icmd(dev, 0x0007f5, 0x3f800000);
+ nv_icmd(dev, 0x0007dc, 0x39291909);
+ nv_icmd(dev, 0x0007dd, 0x79695949);
+ nv_icmd(dev, 0x0007de, 0xb9a99989);
+ nv_icmd(dev, 0x0007df, 0xf9e9d9c9);
+ nv_icmd(dev, 0x0007e8, 0x00003210);
+ nv_icmd(dev, 0x0007e9, 0x00007654);
+ nv_icmd(dev, 0x0007ea, 0x00000098);
+ nv_icmd(dev, 0x0007ec, 0x39291909);
+ nv_icmd(dev, 0x0007ed, 0x79695949);
+ nv_icmd(dev, 0x0007ee, 0xb9a99989);
+ nv_icmd(dev, 0x0007ef, 0xf9e9d9c9);
+ nv_icmd(dev, 0x0007f0, 0x00003210);
+ nv_icmd(dev, 0x0007f1, 0x00007654);
+ nv_icmd(dev, 0x0007f2, 0x00000098);
+ nv_icmd(dev, 0x0005a5, 0x00000001);
+ nv_icmd(dev, 0x000980, 0x00000000);
+ nv_icmd(dev, 0x000981, 0x00000000);
+ nv_icmd(dev, 0x000982, 0x00000000);
+ nv_icmd(dev, 0x000983, 0x00000000);
+ nv_icmd(dev, 0x000984, 0x00000000);
+ nv_icmd(dev, 0x000985, 0x00000000);
+ nv_icmd(dev, 0x000986, 0x00000000);
+ nv_icmd(dev, 0x000987, 0x00000000);
+ nv_icmd(dev, 0x000988, 0x00000000);
+ nv_icmd(dev, 0x000989, 0x00000000);
+ nv_icmd(dev, 0x00098a, 0x00000000);
+ nv_icmd(dev, 0x00098b, 0x00000000);
+ nv_icmd(dev, 0x00098c, 0x00000000);
+ nv_icmd(dev, 0x00098d, 0x00000000);
+ nv_icmd(dev, 0x00098e, 0x00000000);
+ nv_icmd(dev, 0x00098f, 0x00000000);
+ nv_icmd(dev, 0x000990, 0x00000000);
+ nv_icmd(dev, 0x000991, 0x00000000);
+ nv_icmd(dev, 0x000992, 0x00000000);
+ nv_icmd(dev, 0x000993, 0x00000000);
+ nv_icmd(dev, 0x000994, 0x00000000);
+ nv_icmd(dev, 0x000995, 0x00000000);
+ nv_icmd(dev, 0x000996, 0x00000000);
+ nv_icmd(dev, 0x000997, 0x00000000);
+ nv_icmd(dev, 0x000998, 0x00000000);
+ nv_icmd(dev, 0x000999, 0x00000000);
+ nv_icmd(dev, 0x00099a, 0x00000000);
+ nv_icmd(dev, 0x00099b, 0x00000000);
+ nv_icmd(dev, 0x00099c, 0x00000000);
+ nv_icmd(dev, 0x00099d, 0x00000000);
+ nv_icmd(dev, 0x00099e, 0x00000000);
+ nv_icmd(dev, 0x00099f, 0x00000000);
+ nv_icmd(dev, 0x0009a0, 0x00000000);
+ nv_icmd(dev, 0x0009a1, 0x00000000);
+ nv_icmd(dev, 0x0009a2, 0x00000000);
+ nv_icmd(dev, 0x0009a3, 0x00000000);
+ nv_icmd(dev, 0x0009a4, 0x00000000);
+ nv_icmd(dev, 0x0009a5, 0x00000000);
+ nv_icmd(dev, 0x0009a6, 0x00000000);
+ nv_icmd(dev, 0x0009a7, 0x00000000);
+ nv_icmd(dev, 0x0009a8, 0x00000000);
+ nv_icmd(dev, 0x0009a9, 0x00000000);
+ nv_icmd(dev, 0x0009aa, 0x00000000);
+ nv_icmd(dev, 0x0009ab, 0x00000000);
+ nv_icmd(dev, 0x0009ac, 0x00000000);
+ nv_icmd(dev, 0x0009ad, 0x00000000);
+ nv_icmd(dev, 0x0009ae, 0x00000000);
+ nv_icmd(dev, 0x0009af, 0x00000000);
+ nv_icmd(dev, 0x0009b0, 0x00000000);
+ nv_icmd(dev, 0x0009b1, 0x00000000);
+ nv_icmd(dev, 0x0009b2, 0x00000000);
+ nv_icmd(dev, 0x0009b3, 0x00000000);
+ nv_icmd(dev, 0x0009b4, 0x00000000);
+ nv_icmd(dev, 0x0009b5, 0x00000000);
+ nv_icmd(dev, 0x0009b6, 0x00000000);
+ nv_icmd(dev, 0x0009b7, 0x00000000);
+ nv_icmd(dev, 0x0009b8, 0x00000000);
+ nv_icmd(dev, 0x0009b9, 0x00000000);
+ nv_icmd(dev, 0x0009ba, 0x00000000);
+ nv_icmd(dev, 0x0009bb, 0x00000000);
+ nv_icmd(dev, 0x0009bc, 0x00000000);
+ nv_icmd(dev, 0x0009bd, 0x00000000);
+ nv_icmd(dev, 0x0009be, 0x00000000);
+ nv_icmd(dev, 0x0009bf, 0x00000000);
+ nv_icmd(dev, 0x0009c0, 0x00000000);
+ nv_icmd(dev, 0x0009c1, 0x00000000);
+ nv_icmd(dev, 0x0009c2, 0x00000000);
+ nv_icmd(dev, 0x0009c3, 0x00000000);
+ nv_icmd(dev, 0x0009c4, 0x00000000);
+ nv_icmd(dev, 0x0009c5, 0x00000000);
+ nv_icmd(dev, 0x0009c6, 0x00000000);
+ nv_icmd(dev, 0x0009c7, 0x00000000);
+ nv_icmd(dev, 0x0009c8, 0x00000000);
+ nv_icmd(dev, 0x0009c9, 0x00000000);
+ nv_icmd(dev, 0x0009ca, 0x00000000);
+ nv_icmd(dev, 0x0009cb, 0x00000000);
+ nv_icmd(dev, 0x0009cc, 0x00000000);
+ nv_icmd(dev, 0x0009cd, 0x00000000);
+ nv_icmd(dev, 0x0009ce, 0x00000000);
+ nv_icmd(dev, 0x0009cf, 0x00000000);
+ nv_icmd(dev, 0x0009d0, 0x00000000);
+ nv_icmd(dev, 0x0009d1, 0x00000000);
+ nv_icmd(dev, 0x0009d2, 0x00000000);
+ nv_icmd(dev, 0x0009d3, 0x00000000);
+ nv_icmd(dev, 0x0009d4, 0x00000000);
+ nv_icmd(dev, 0x0009d5, 0x00000000);
+ nv_icmd(dev, 0x0009d6, 0x00000000);
+ nv_icmd(dev, 0x0009d7, 0x00000000);
+ nv_icmd(dev, 0x0009d8, 0x00000000);
+ nv_icmd(dev, 0x0009d9, 0x00000000);
+ nv_icmd(dev, 0x0009da, 0x00000000);
+ nv_icmd(dev, 0x0009db, 0x00000000);
+ nv_icmd(dev, 0x0009dc, 0x00000000);
+ nv_icmd(dev, 0x0009dd, 0x00000000);
+ nv_icmd(dev, 0x0009de, 0x00000000);
+ nv_icmd(dev, 0x0009df, 0x00000000);
+ nv_icmd(dev, 0x0009e0, 0x00000000);
+ nv_icmd(dev, 0x0009e1, 0x00000000);
+ nv_icmd(dev, 0x0009e2, 0x00000000);
+ nv_icmd(dev, 0x0009e3, 0x00000000);
+ nv_icmd(dev, 0x0009e4, 0x00000000);
+ nv_icmd(dev, 0x0009e5, 0x00000000);
+ nv_icmd(dev, 0x0009e6, 0x00000000);
+ nv_icmd(dev, 0x0009e7, 0x00000000);
+ nv_icmd(dev, 0x0009e8, 0x00000000);
+ nv_icmd(dev, 0x0009e9, 0x00000000);
+ nv_icmd(dev, 0x0009ea, 0x00000000);
+ nv_icmd(dev, 0x0009eb, 0x00000000);
+ nv_icmd(dev, 0x0009ec, 0x00000000);
+ nv_icmd(dev, 0x0009ed, 0x00000000);
+ nv_icmd(dev, 0x0009ee, 0x00000000);
+ nv_icmd(dev, 0x0009ef, 0x00000000);
+ nv_icmd(dev, 0x0009f0, 0x00000000);
+ nv_icmd(dev, 0x0009f1, 0x00000000);
+ nv_icmd(dev, 0x0009f2, 0x00000000);
+ nv_icmd(dev, 0x0009f3, 0x00000000);
+ nv_icmd(dev, 0x0009f4, 0x00000000);
+ nv_icmd(dev, 0x0009f5, 0x00000000);
+ nv_icmd(dev, 0x0009f6, 0x00000000);
+ nv_icmd(dev, 0x0009f7, 0x00000000);
+ nv_icmd(dev, 0x0009f8, 0x00000000);
+ nv_icmd(dev, 0x0009f9, 0x00000000);
+ nv_icmd(dev, 0x0009fa, 0x00000000);
+ nv_icmd(dev, 0x0009fb, 0x00000000);
+ nv_icmd(dev, 0x0009fc, 0x00000000);
+ nv_icmd(dev, 0x0009fd, 0x00000000);
+ nv_icmd(dev, 0x0009fe, 0x00000000);
+ nv_icmd(dev, 0x0009ff, 0x00000000);
+ nv_icmd(dev, 0x000468, 0x00000004);
+ nv_icmd(dev, 0x00046c, 0x00000001);
+ nv_icmd(dev, 0x000470, 0x00000000);
+ nv_icmd(dev, 0x000471, 0x00000000);
+ nv_icmd(dev, 0x000472, 0x00000000);
+ nv_icmd(dev, 0x000473, 0x00000000);
+ nv_icmd(dev, 0x000474, 0x00000000);
+ nv_icmd(dev, 0x000475, 0x00000000);
+ nv_icmd(dev, 0x000476, 0x00000000);
+ nv_icmd(dev, 0x000477, 0x00000000);
+ nv_icmd(dev, 0x000478, 0x00000000);
+ nv_icmd(dev, 0x000479, 0x00000000);
+ nv_icmd(dev, 0x00047a, 0x00000000);
+ nv_icmd(dev, 0x00047b, 0x00000000);
+ nv_icmd(dev, 0x00047c, 0x00000000);
+ nv_icmd(dev, 0x00047d, 0x00000000);
+ nv_icmd(dev, 0x00047e, 0x00000000);
+ nv_icmd(dev, 0x00047f, 0x00000000);
+ nv_icmd(dev, 0x000480, 0x00000000);
+ nv_icmd(dev, 0x000481, 0x00000000);
+ nv_icmd(dev, 0x000482, 0x00000000);
+ nv_icmd(dev, 0x000483, 0x00000000);
+ nv_icmd(dev, 0x000484, 0x00000000);
+ nv_icmd(dev, 0x000485, 0x00000000);
+ nv_icmd(dev, 0x000486, 0x00000000);
+ nv_icmd(dev, 0x000487, 0x00000000);
+ nv_icmd(dev, 0x000488, 0x00000000);
+ nv_icmd(dev, 0x000489, 0x00000000);
+ nv_icmd(dev, 0x00048a, 0x00000000);
+ nv_icmd(dev, 0x00048b, 0x00000000);
+ nv_icmd(dev, 0x00048c, 0x00000000);
+ nv_icmd(dev, 0x00048d, 0x00000000);
+ nv_icmd(dev, 0x00048e, 0x00000000);
+ nv_icmd(dev, 0x00048f, 0x00000000);
+ nv_icmd(dev, 0x000490, 0x00000000);
+ nv_icmd(dev, 0x000491, 0x00000000);
+ nv_icmd(dev, 0x000492, 0x00000000);
+ nv_icmd(dev, 0x000493, 0x00000000);
+ nv_icmd(dev, 0x000494, 0x00000000);
+ nv_icmd(dev, 0x000495, 0x00000000);
+ nv_icmd(dev, 0x000496, 0x00000000);
+ nv_icmd(dev, 0x000497, 0x00000000);
+ nv_icmd(dev, 0x000498, 0x00000000);
+ nv_icmd(dev, 0x000499, 0x00000000);
+ nv_icmd(dev, 0x00049a, 0x00000000);
+ nv_icmd(dev, 0x00049b, 0x00000000);
+ nv_icmd(dev, 0x00049c, 0x00000000);
+ nv_icmd(dev, 0x00049d, 0x00000000);
+ nv_icmd(dev, 0x00049e, 0x00000000);
+ nv_icmd(dev, 0x00049f, 0x00000000);
+ nv_icmd(dev, 0x0004a0, 0x00000000);
+ nv_icmd(dev, 0x0004a1, 0x00000000);
+ nv_icmd(dev, 0x0004a2, 0x00000000);
+ nv_icmd(dev, 0x0004a3, 0x00000000);
+ nv_icmd(dev, 0x0004a4, 0x00000000);
+ nv_icmd(dev, 0x0004a5, 0x00000000);
+ nv_icmd(dev, 0x0004a6, 0x00000000);
+ nv_icmd(dev, 0x0004a7, 0x00000000);
+ nv_icmd(dev, 0x0004a8, 0x00000000);
+ nv_icmd(dev, 0x0004a9, 0x00000000);
+ nv_icmd(dev, 0x0004aa, 0x00000000);
+ nv_icmd(dev, 0x0004ab, 0x00000000);
+ nv_icmd(dev, 0x0004ac, 0x00000000);
+ nv_icmd(dev, 0x0004ad, 0x00000000);
+ nv_icmd(dev, 0x0004ae, 0x00000000);
+ nv_icmd(dev, 0x0004af, 0x00000000);
+ nv_icmd(dev, 0x0004b0, 0x00000000);
+ nv_icmd(dev, 0x0004b1, 0x00000000);
+ nv_icmd(dev, 0x0004b2, 0x00000000);
+ nv_icmd(dev, 0x0004b3, 0x00000000);
+ nv_icmd(dev, 0x0004b4, 0x00000000);
+ nv_icmd(dev, 0x0004b5, 0x00000000);
+ nv_icmd(dev, 0x0004b6, 0x00000000);
+ nv_icmd(dev, 0x0004b7, 0x00000000);
+ nv_icmd(dev, 0x0004b8, 0x00000000);
+ nv_icmd(dev, 0x0004b9, 0x00000000);
+ nv_icmd(dev, 0x0004ba, 0x00000000);
+ nv_icmd(dev, 0x0004bb, 0x00000000);
+ nv_icmd(dev, 0x0004bc, 0x00000000);
+ nv_icmd(dev, 0x0004bd, 0x00000000);
+ nv_icmd(dev, 0x0004be, 0x00000000);
+ nv_icmd(dev, 0x0004bf, 0x00000000);
+ nv_icmd(dev, 0x0004c0, 0x00000000);
+ nv_icmd(dev, 0x0004c1, 0x00000000);
+ nv_icmd(dev, 0x0004c2, 0x00000000);
+ nv_icmd(dev, 0x0004c3, 0x00000000);
+ nv_icmd(dev, 0x0004c4, 0x00000000);
+ nv_icmd(dev, 0x0004c5, 0x00000000);
+ nv_icmd(dev, 0x0004c6, 0x00000000);
+ nv_icmd(dev, 0x0004c7, 0x00000000);
+ nv_icmd(dev, 0x0004c8, 0x00000000);
+ nv_icmd(dev, 0x0004c9, 0x00000000);
+ nv_icmd(dev, 0x0004ca, 0x00000000);
+ nv_icmd(dev, 0x0004cb, 0x00000000);
+ nv_icmd(dev, 0x0004cc, 0x00000000);
+ nv_icmd(dev, 0x0004cd, 0x00000000);
+ nv_icmd(dev, 0x0004ce, 0x00000000);
+ nv_icmd(dev, 0x0004cf, 0x00000000);
+ nv_icmd(dev, 0x000510, 0x3f800000);
+ nv_icmd(dev, 0x000511, 0x3f800000);
+ nv_icmd(dev, 0x000512, 0x3f800000);
+ nv_icmd(dev, 0x000513, 0x3f800000);
+ nv_icmd(dev, 0x000514, 0x3f800000);
+ nv_icmd(dev, 0x000515, 0x3f800000);
+ nv_icmd(dev, 0x000516, 0x3f800000);
+ nv_icmd(dev, 0x000517, 0x3f800000);
+ nv_icmd(dev, 0x000518, 0x3f800000);
+ nv_icmd(dev, 0x000519, 0x3f800000);
+ nv_icmd(dev, 0x00051a, 0x3f800000);
+ nv_icmd(dev, 0x00051b, 0x3f800000);
+ nv_icmd(dev, 0x00051c, 0x3f800000);
+ nv_icmd(dev, 0x00051d, 0x3f800000);
+ nv_icmd(dev, 0x00051e, 0x3f800000);
+ nv_icmd(dev, 0x00051f, 0x3f800000);
+ nv_icmd(dev, 0x000520, 0x000002b6);
+ nv_icmd(dev, 0x000529, 0x00000001);
+ nv_icmd(dev, 0x000530, 0xffff0000);
+ nv_icmd(dev, 0x000531, 0xffff0000);
+ nv_icmd(dev, 0x000532, 0xffff0000);
+ nv_icmd(dev, 0x000533, 0xffff0000);
+ nv_icmd(dev, 0x000534, 0xffff0000);
+ nv_icmd(dev, 0x000535, 0xffff0000);
+ nv_icmd(dev, 0x000536, 0xffff0000);
+ nv_icmd(dev, 0x000537, 0xffff0000);
+ nv_icmd(dev, 0x000538, 0xffff0000);
+ nv_icmd(dev, 0x000539, 0xffff0000);
+ nv_icmd(dev, 0x00053a, 0xffff0000);
+ nv_icmd(dev, 0x00053b, 0xffff0000);
+ nv_icmd(dev, 0x00053c, 0xffff0000);
+ nv_icmd(dev, 0x00053d, 0xffff0000);
+ nv_icmd(dev, 0x00053e, 0xffff0000);
+ nv_icmd(dev, 0x00053f, 0xffff0000);
+ nv_icmd(dev, 0x000585, 0x0000003f);
+ nv_icmd(dev, 0x000576, 0x00000003);
+ nv_icmd(dev, 0x00057b, 0x00000059);
+ nv_icmd(dev, 0x000586, 0x00000040);
+ nv_icmd(dev, 0x000582, 0x00000080);
+ nv_icmd(dev, 0x000583, 0x00000080);
+ nv_icmd(dev, 0x0005c2, 0x00000001);
+ nv_icmd(dev, 0x000638, 0x00000001);
+ nv_icmd(dev, 0x000639, 0x00000001);
+ nv_icmd(dev, 0x00063a, 0x00000002);
+ nv_icmd(dev, 0x00063b, 0x00000001);
+ nv_icmd(dev, 0x00063c, 0x00000001);
+ nv_icmd(dev, 0x00063d, 0x00000002);
+ nv_icmd(dev, 0x00063e, 0x00000001);
+ nv_icmd(dev, 0x0008b8, 0x00000001);
+ nv_icmd(dev, 0x0008b9, 0x00000001);
+ nv_icmd(dev, 0x0008ba, 0x00000001);
+ nv_icmd(dev, 0x0008bb, 0x00000001);
+ nv_icmd(dev, 0x0008bc, 0x00000001);
+ nv_icmd(dev, 0x0008bd, 0x00000001);
+ nv_icmd(dev, 0x0008be, 0x00000001);
+ nv_icmd(dev, 0x0008bf, 0x00000001);
+ nv_icmd(dev, 0x000900, 0x00000001);
+ nv_icmd(dev, 0x000901, 0x00000001);
+ nv_icmd(dev, 0x000902, 0x00000001);
+ nv_icmd(dev, 0x000903, 0x00000001);
+ nv_icmd(dev, 0x000904, 0x00000001);
+ nv_icmd(dev, 0x000905, 0x00000001);
+ nv_icmd(dev, 0x000906, 0x00000001);
+ nv_icmd(dev, 0x000907, 0x00000001);
+ nv_icmd(dev, 0x000908, 0x00000002);
+ nv_icmd(dev, 0x000909, 0x00000002);
+ nv_icmd(dev, 0x00090a, 0x00000002);
+ nv_icmd(dev, 0x00090b, 0x00000002);
+ nv_icmd(dev, 0x00090c, 0x00000002);
+ nv_icmd(dev, 0x00090d, 0x00000002);
+ nv_icmd(dev, 0x00090e, 0x00000002);
+ nv_icmd(dev, 0x00090f, 0x00000002);
+ nv_icmd(dev, 0x000910, 0x00000001);
+ nv_icmd(dev, 0x000911, 0x00000001);
+ nv_icmd(dev, 0x000912, 0x00000001);
+ nv_icmd(dev, 0x000913, 0x00000001);
+ nv_icmd(dev, 0x000914, 0x00000001);
+ nv_icmd(dev, 0x000915, 0x00000001);
+ nv_icmd(dev, 0x000916, 0x00000001);
+ nv_icmd(dev, 0x000917, 0x00000001);
+ nv_icmd(dev, 0x000918, 0x00000001);
+ nv_icmd(dev, 0x000919, 0x00000001);
+ nv_icmd(dev, 0x00091a, 0x00000001);
+ nv_icmd(dev, 0x00091b, 0x00000001);
+ nv_icmd(dev, 0x00091c, 0x00000001);
+ nv_icmd(dev, 0x00091d, 0x00000001);
+ nv_icmd(dev, 0x00091e, 0x00000001);
+ nv_icmd(dev, 0x00091f, 0x00000001);
+ nv_icmd(dev, 0x000920, 0x00000002);
+ nv_icmd(dev, 0x000921, 0x00000002);
+ nv_icmd(dev, 0x000922, 0x00000002);
+ nv_icmd(dev, 0x000923, 0x00000002);
+ nv_icmd(dev, 0x000924, 0x00000002);
+ nv_icmd(dev, 0x000925, 0x00000002);
+ nv_icmd(dev, 0x000926, 0x00000002);
+ nv_icmd(dev, 0x000927, 0x00000002);
+ nv_icmd(dev, 0x000928, 0x00000001);
+ nv_icmd(dev, 0x000929, 0x00000001);
+ nv_icmd(dev, 0x00092a, 0x00000001);
+ nv_icmd(dev, 0x00092b, 0x00000001);
+ nv_icmd(dev, 0x00092c, 0x00000001);
+ nv_icmd(dev, 0x00092d, 0x00000001);
+ nv_icmd(dev, 0x00092e, 0x00000001);
+ nv_icmd(dev, 0x00092f, 0x00000001);
+ nv_icmd(dev, 0x000648, 0x00000001);
+ nv_icmd(dev, 0x000649, 0x00000001);
+ nv_icmd(dev, 0x00064a, 0x00000001);
+ nv_icmd(dev, 0x00064b, 0x00000001);
+ nv_icmd(dev, 0x00064c, 0x00000001);
+ nv_icmd(dev, 0x00064d, 0x00000001);
+ nv_icmd(dev, 0x00064e, 0x00000001);
+ nv_icmd(dev, 0x00064f, 0x00000001);
+ nv_icmd(dev, 0x000650, 0x00000001);
+ nv_icmd(dev, 0x000658, 0x0000000f);
+ nv_icmd(dev, 0x0007ff, 0x0000000a);
+ nv_icmd(dev, 0x00066a, 0x40000000);
+ nv_icmd(dev, 0x00066b, 0x10000000);
+ nv_icmd(dev, 0x00066c, 0xffff0000);
+ nv_icmd(dev, 0x00066d, 0xffff0000);
+ nv_icmd(dev, 0x0007af, 0x00000008);
+ nv_icmd(dev, 0x0007b0, 0x00000008);
+ nv_icmd(dev, 0x0007f6, 0x00000001);
+ nv_icmd(dev, 0x0006b2, 0x00000055);
+ nv_icmd(dev, 0x0007ad, 0x00000003);
+ nv_icmd(dev, 0x000937, 0x00000001);
+ nv_icmd(dev, 0x000971, 0x00000008);
+ nv_icmd(dev, 0x000972, 0x00000040);
+ nv_icmd(dev, 0x000973, 0x0000012c);
+ nv_icmd(dev, 0x00097c, 0x00000040);
+ nv_icmd(dev, 0x000979, 0x00000003);
+ nv_icmd(dev, 0x000975, 0x00000020);
+ nv_icmd(dev, 0x000976, 0x00000001);
+ nv_icmd(dev, 0x000977, 0x00000020);
+ nv_icmd(dev, 0x000978, 0x00000001);
+ nv_icmd(dev, 0x000957, 0x00000003);
+ nv_icmd(dev, 0x00095e, 0x20164010);
+ nv_icmd(dev, 0x00095f, 0x00000020);
+ nv_icmd(dev, 0x00097d, 0x00000020);
+ nv_icmd(dev, 0x000683, 0x00000006);
+ nv_icmd(dev, 0x000685, 0x003fffff);
+ nv_icmd(dev, 0x000687, 0x003fffff);
+ nv_icmd(dev, 0x0006a0, 0x00000005);
+ nv_icmd(dev, 0x000840, 0x00400008);
+ nv_icmd(dev, 0x000841, 0x08000080);
+ nv_icmd(dev, 0x000842, 0x00400008);
+ nv_icmd(dev, 0x000843, 0x08000080);
+ nv_icmd(dev, 0x000818, 0x00000000);
+ nv_icmd(dev, 0x000819, 0x00000000);
+ nv_icmd(dev, 0x00081a, 0x00000000);
+ nv_icmd(dev, 0x00081b, 0x00000000);
+ nv_icmd(dev, 0x00081c, 0x00000000);
+ nv_icmd(dev, 0x00081d, 0x00000000);
+ nv_icmd(dev, 0x00081e, 0x00000000);
+ nv_icmd(dev, 0x00081f, 0x00000000);
+ nv_icmd(dev, 0x000848, 0x00000000);
+ nv_icmd(dev, 0x000849, 0x00000000);
+ nv_icmd(dev, 0x00084a, 0x00000000);
+ nv_icmd(dev, 0x00084b, 0x00000000);
+ nv_icmd(dev, 0x00084c, 0x00000000);
+ nv_icmd(dev, 0x00084d, 0x00000000);
+ nv_icmd(dev, 0x00084e, 0x00000000);
+ nv_icmd(dev, 0x00084f, 0x00000000);
+ nv_icmd(dev, 0x000850, 0x00000000);
+ nv_icmd(dev, 0x000851, 0x00000000);
+ nv_icmd(dev, 0x000852, 0x00000000);
+ nv_icmd(dev, 0x000853, 0x00000000);
+ nv_icmd(dev, 0x000854, 0x00000000);
+ nv_icmd(dev, 0x000855, 0x00000000);
+ nv_icmd(dev, 0x000856, 0x00000000);
+ nv_icmd(dev, 0x000857, 0x00000000);
+ nv_icmd(dev, 0x000738, 0x00000000);
+ nv_icmd(dev, 0x0006aa, 0x00000001);
+ nv_icmd(dev, 0x0006ab, 0x00000002);
+ nv_icmd(dev, 0x0006ac, 0x00000080);
+ nv_icmd(dev, 0x0006ad, 0x00000100);
+ nv_icmd(dev, 0x0006ae, 0x00000100);
+ nv_icmd(dev, 0x0006b1, 0x00000011);
+ nv_icmd(dev, 0x0006bb, 0x000000cf);
+ nv_icmd(dev, 0x0006ce, 0x2a712488);
+ nv_icmd(dev, 0x000739, 0x4085c000);
+ nv_icmd(dev, 0x00073a, 0x00000080);
+ nv_icmd(dev, 0x000786, 0x80000100);
+ nv_icmd(dev, 0x00073c, 0x00010100);
+ nv_icmd(dev, 0x00073d, 0x02800000);
+ nv_icmd(dev, 0x000787, 0x000000cf);
+ nv_icmd(dev, 0x00078c, 0x00000008);
+ nv_icmd(dev, 0x000792, 0x00000001);
+ nv_icmd(dev, 0x000794, 0x00000001);
+ nv_icmd(dev, 0x000795, 0x00000001);
+ nv_icmd(dev, 0x000796, 0x00000001);
+ nv_icmd(dev, 0x000797, 0x000000cf);
+ nv_icmd(dev, 0x000836, 0x00000001);
+ nv_icmd(dev, 0x00079a, 0x00000002);
+ nv_icmd(dev, 0x000833, 0x04444480);
+ nv_icmd(dev, 0x0007a1, 0x00000001);
+ nv_icmd(dev, 0x0007a3, 0x00000001);
+ nv_icmd(dev, 0x0007a4, 0x00000001);
+ nv_icmd(dev, 0x0007a5, 0x00000001);
+ nv_icmd(dev, 0x000831, 0x00000004);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x000a04, 0x000000ff);
+ nv_icmd(dev, 0x000a0b, 0x00000040);
+ nv_icmd(dev, 0x00097f, 0x00000100);
+ nv_icmd(dev, 0x000a02, 0x00000001);
+ nv_icmd(dev, 0x000809, 0x00000007);
+ nv_icmd(dev, 0x00c221, 0x00000040);
+ nv_icmd(dev, 0x00c1b0, 0x0000000f);
+ nv_icmd(dev, 0x00c1b1, 0x0000000f);
+ nv_icmd(dev, 0x00c1b2, 0x0000000f);
+ nv_icmd(dev, 0x00c1b3, 0x0000000f);
+ nv_icmd(dev, 0x00c1b4, 0x0000000f);
+ nv_icmd(dev, 0x00c1b5, 0x0000000f);
+ nv_icmd(dev, 0x00c1b6, 0x0000000f);
+ nv_icmd(dev, 0x00c1b7, 0x0000000f);
+ nv_icmd(dev, 0x00c1b8, 0x0fac6881);
+ nv_icmd(dev, 0x00c1b9, 0x00fac688);
+ nv_icmd(dev, 0x00c401, 0x00000001);
+ nv_icmd(dev, 0x00c402, 0x00010001);
+ nv_icmd(dev, 0x00c403, 0x00000001);
+ nv_icmd(dev, 0x00c404, 0x00000001);
+ nv_icmd(dev, 0x00c40e, 0x00000020);
+ nv_icmd(dev, 0x00c500, 0x00000003);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000002);
+ nv_icmd(dev, 0x0006aa, 0x00000001);
+ nv_icmd(dev, 0x0006ad, 0x00000100);
+ nv_icmd(dev, 0x0006ae, 0x00000100);
+ nv_icmd(dev, 0x0006b1, 0x00000011);
+ nv_icmd(dev, 0x00078c, 0x00000008);
+ nv_icmd(dev, 0x000792, 0x00000001);
+ nv_icmd(dev, 0x000794, 0x00000001);
+ nv_icmd(dev, 0x000795, 0x00000001);
+ nv_icmd(dev, 0x000796, 0x00000001);
+ nv_icmd(dev, 0x000797, 0x000000cf);
+ nv_icmd(dev, 0x00079a, 0x00000002);
+ nv_icmd(dev, 0x000833, 0x04444480);
+ nv_icmd(dev, 0x0007a1, 0x00000001);
+ nv_icmd(dev, 0x0007a3, 0x00000001);
+ nv_icmd(dev, 0x0007a4, 0x00000001);
+ nv_icmd(dev, 0x0007a5, 0x00000001);
+ nv_icmd(dev, 0x000831, 0x00000004);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000008);
+ nv_icmd(dev, 0x000039, 0x00000000);
+ nv_icmd(dev, 0x00003a, 0x00000000);
+ nv_icmd(dev, 0x00003b, 0x00000000);
+ nv_icmd(dev, 0x000380, 0x00000001);
+ nv_icmd(dev, 0x000366, 0x00000000);
+ nv_icmd(dev, 0x000367, 0x00000000);
+ nv_icmd(dev, 0x000368, 0x00000fff);
+ nv_icmd(dev, 0x000370, 0x00000000);
+ nv_icmd(dev, 0x000371, 0x00000000);
+ nv_icmd(dev, 0x000372, 0x000fffff);
+ nv_icmd(dev, 0x000813, 0x00000006);
+ nv_icmd(dev, 0x000814, 0x00000008);
+ nv_icmd(dev, 0x000957, 0x00000003);
+ nv_icmd(dev, 0x000818, 0x00000000);
+ nv_icmd(dev, 0x000819, 0x00000000);
+ nv_icmd(dev, 0x00081a, 0x00000000);
+ nv_icmd(dev, 0x00081b, 0x00000000);
+ nv_icmd(dev, 0x00081c, 0x00000000);
+ nv_icmd(dev, 0x00081d, 0x00000000);
+ nv_icmd(dev, 0x00081e, 0x00000000);
+ nv_icmd(dev, 0x00081f, 0x00000000);
+ nv_icmd(dev, 0x000848, 0x00000000);
+ nv_icmd(dev, 0x000849, 0x00000000);
+ nv_icmd(dev, 0x00084a, 0x00000000);
+ nv_icmd(dev, 0x00084b, 0x00000000);
+ nv_icmd(dev, 0x00084c, 0x00000000);
+ nv_icmd(dev, 0x00084d, 0x00000000);
+ nv_icmd(dev, 0x00084e, 0x00000000);
+ nv_icmd(dev, 0x00084f, 0x00000000);
+ nv_icmd(dev, 0x000850, 0x00000000);
+ nv_icmd(dev, 0x000851, 0x00000000);
+ nv_icmd(dev, 0x000852, 0x00000000);
+ nv_icmd(dev, 0x000853, 0x00000000);
+ nv_icmd(dev, 0x000854, 0x00000000);
+ nv_icmd(dev, 0x000855, 0x00000000);
+ nv_icmd(dev, 0x000856, 0x00000000);
+ nv_icmd(dev, 0x000857, 0x00000000);
+ nv_icmd(dev, 0x000738, 0x00000000);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x000a04, 0x000000ff);
+ nv_icmd(dev, 0x00097f, 0x00000100);
+ nv_icmd(dev, 0x000a02, 0x00000001);
+ nv_icmd(dev, 0x000809, 0x00000007);
+ nv_icmd(dev, 0x00c221, 0x00000040);
+ nv_icmd(dev, 0x00c401, 0x00000001);
+ nv_icmd(dev, 0x00c402, 0x00010001);
+ nv_icmd(dev, 0x00c403, 0x00000001);
+ nv_icmd(dev, 0x00c404, 0x00000001);
+ nv_icmd(dev, 0x00c40e, 0x00000020);
+ nv_icmd(dev, 0x00c500, 0x00000003);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_icmd(dev, 0x001000, 0x00000001);
+ nv_icmd(dev, 0x000b07, 0x00000002);
+ nv_icmd(dev, 0x000b08, 0x00000100);
+ nv_icmd(dev, 0x000b09, 0x00000100);
+ nv_icmd(dev, 0x000b0a, 0x00000001);
+ nv_icmd(dev, 0x01e100, 0x00000001);
+ nv_wr32(dev, 0x400208, 0x00000000);
+}
+
+static void
+nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
+{
+ nv_wr32(dev, 0x40448c, data);
+ nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
+}
+
+static void
+nve0_grctx_generate_a097(struct drm_device *dev)
+{
+ nv_mthd(dev, 0xa097, 0x0800, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0840, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0900, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0980, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0804, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0844, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0904, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0808, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0848, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0888, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x08c8, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0908, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0948, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x0988, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x09c8, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x080c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x084c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x088c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x08cc, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x090c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x094c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x098c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x09cc, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x0810, 0x000000cf);
+ nv_mthd(dev, 0xa097, 0x0850, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0910, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0990, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0814, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0854, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0894, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x08d4, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0914, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0954, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0994, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x09d4, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0818, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0858, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0898, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x08d8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0918, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0958, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0998, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x09d8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x081c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x085c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x089c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x091c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x095c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x099c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0820, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0860, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x08e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0920, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0960, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x09e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ca8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ce8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cf8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1c9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ccc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1cfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1da8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1db8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1de8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1df8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1d9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ddc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1dfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f3c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1f9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fa4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fbc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fe4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1fec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ff4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1ffc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2000, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2040, 0x00000011);
+ nv_mthd(dev, 0xa097, 0x2080, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x20c0, 0x00000030);
+ nv_mthd(dev, 0xa097, 0x2100, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x2140, 0x00000051);
+ nv_mthd(dev, 0xa097, 0x200c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x204c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x208c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x20cc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x210c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x214c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x2010, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2050, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2090, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x20d0, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x2110, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x2150, 0x00000004);
+ nv_mthd(dev, 0xa097, 0x0380, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0384, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0388, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x038c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x03ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0700, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0710, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0720, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0730, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0704, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0714, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0724, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0734, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0708, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0718, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0728, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0738, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2800, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2804, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2808, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x280c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2810, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2814, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2818, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x281c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2820, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2824, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2828, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x282c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2830, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2834, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2838, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x283c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2840, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2844, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2848, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x284c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2850, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2854, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2858, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x285c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2860, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2864, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2868, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x286c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2870, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2874, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2878, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x287c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2888, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x288c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2894, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2898, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x289c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x28fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2900, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2904, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2908, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x290c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2910, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2914, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2918, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x291c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2920, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2924, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2928, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x292c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2930, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2934, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2938, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x293c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2948, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x294c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2954, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2958, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x295c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2960, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2964, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2968, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x296c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2970, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2974, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2978, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x297c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2980, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2988, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x298c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2990, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2994, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2998, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x299c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x29fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aa8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ac8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ae8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ba8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0be8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0acc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0aec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b2c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b6c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ab0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ad0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0af0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0a94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ab4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ad4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0af4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0b94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0bf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c24, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c34, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c64, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c94, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c28, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c38, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c68, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c78, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c98, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ca8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cb8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cd8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ce8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0cf8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0c0c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c1c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c2c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c3c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c4c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c5c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c6c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c7c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c8c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0c9c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cac, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cbc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0ccc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cdc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cec, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0cfc, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0d00, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d08, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d10, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d18, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d20, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d28, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d30, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d38, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d04, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d0c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d14, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d1c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d24, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d2c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d34, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d3c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e00, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e20, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e30, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e60, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e70, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ea0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0eb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ec0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ed0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ee0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ef0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0e04, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e14, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e24, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e34, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e44, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e54, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e64, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e74, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e84, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e94, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ea4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0eb4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ec4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ed4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ee4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ef4, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e08, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e18, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e28, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e38, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e48, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e58, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e68, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e78, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e88, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0e98, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ea8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0eb8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ec8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ed8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ee8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0ef8, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d40, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d48, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d50, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d44, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d4c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1e00, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e20, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e40, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e60, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e80, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ea0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ec0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ee0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e04, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e24, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e44, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e64, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e84, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ea4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ec4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ee4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e08, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e28, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e48, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e68, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e88, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ea8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ec8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ee8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e0c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e2c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e4c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e6c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e8c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ecc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e10, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e30, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e50, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e70, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e90, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eb0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ed0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ef0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e14, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e34, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e54, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e74, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e94, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1eb4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ed4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1ef4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1e18, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e38, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e58, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e78, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1e98, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1eb8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ed8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1ef8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x3400, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3404, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3408, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x340c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3410, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3414, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3418, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x341c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3420, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3424, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3428, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x342c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3430, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3434, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3438, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x343c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3440, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3444, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3448, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x344c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3450, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3454, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3458, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x345c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3460, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3464, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3468, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x346c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3470, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3474, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3478, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x347c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3480, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3484, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3488, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x348c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3490, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3494, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3498, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x349c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x34fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3500, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3504, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3508, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x350c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3510, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3514, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3518, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x351c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3520, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3524, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3528, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x352c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3530, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3534, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3538, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x353c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3540, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3544, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3548, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x354c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3550, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3554, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3558, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x355c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3560, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3564, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3568, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x356c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3570, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3574, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3578, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x357c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3580, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3584, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3588, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x358c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3590, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3594, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x3598, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x359c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x35fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x030c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1944, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1514, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d68, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x121c, 0x0fac6881);
+ nv_mthd(dev, 0xa097, 0x0fac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1538, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0fe0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fe4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fe8, 0x00000014);
+ nv_mthd(dev, 0xa097, 0x0fec, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x0ff0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x179c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1228, 0x00000400);
+ nv_mthd(dev, 0xa097, 0x122c, 0x00000300);
+ nv_mthd(dev, 0xa097, 0x1230, 0x00010001);
+ nv_mthd(dev, 0xa097, 0x07f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15b4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1534, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fb0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x153c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x16b4, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x0fbc, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc0, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc4, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0fc8, 0x0000ffff);
+ nv_mthd(dev, 0xa097, 0x0df8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dfc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1948, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1970, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x161c, 0x000009f0);
+ nv_mthd(dev, 0xa097, 0x0dcc, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x163c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1160, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1164, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1168, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x116c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1170, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1174, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1178, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x117c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1180, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1184, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1188, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x118c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1190, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1194, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1198, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x119c, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11a8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11ac, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11b8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11bc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11c8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11cc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d0, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d4, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11d8, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x11dc, 0x25e00040);
+ nv_mthd(dev, 0xa097, 0x1880, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1884, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1888, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x188c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1890, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1894, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1898, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x189c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x18fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17d0, 0x000000ff);
+ nv_mthd(dev, 0xa097, 0x17d4, 0xffffffff);
+ nv_mthd(dev, 0xa097, 0x17d8, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x17dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15f8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1434, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1438, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d74, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x13a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1318, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1644, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0748, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0de8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1648, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1120, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1124, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1128, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x112c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1118, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x164c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1658, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1910, 0x00000290);
+ nv_mthd(dev, 0xa097, 0x1518, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x165c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1520, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1604, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1570, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x13b0, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x13b4, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x020c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1670, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x1674, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x1678, 0xb8a89888);
+ nv_mthd(dev, 0xa097, 0x167c, 0xf8e8d8c8);
+ nv_mthd(dev, 0xa097, 0x166c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1680, 0x00ffff00);
+ nv_mthd(dev, 0xa097, 0x12d0, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x12d4, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1684, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1688, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dac, 0x00001b02);
+ nv_mthd(dev, 0xa097, 0x0db0, 0x00001b02);
+ nv_mthd(dev, 0xa097, 0x0db4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x168c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x15bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x156c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x187c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1110, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0dc0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dc4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0dc8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1234, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1690, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12ac, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0790, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0794, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0798, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x079c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x077c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1000, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x10fc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1290, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0218, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x12d8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12dc, 0x00000010);
+ nv_mthd(dev, 0xa097, 0x0d94, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x155c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1560, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1564, 0x00000fff);
+ nv_mthd(dev, 0xa097, 0x1574, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1578, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x157c, 0x000fffff);
+ nv_mthd(dev, 0xa097, 0x1354, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1610, 0x00000012);
+ nv_mthd(dev, 0xa097, 0x1608, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x160c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x260c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x162c, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x0210, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0320, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0324, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0328, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x032c, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0330, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0334, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0338, 0x3f800000);
+ nv_mthd(dev, 0xa097, 0x0750, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0760, 0x39291909);
+ nv_mthd(dev, 0xa097, 0x0764, 0x79695949);
+ nv_mthd(dev, 0xa097, 0x0768, 0xb9a99989);
+ nv_mthd(dev, 0xa097, 0x076c, 0xf9e9d9c9);
+ nv_mthd(dev, 0xa097, 0x0770, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x0774, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x0778, 0x00009080);
+ nv_mthd(dev, 0xa097, 0x0780, 0x39291909);
+ nv_mthd(dev, 0xa097, 0x0784, 0x79695949);
+ nv_mthd(dev, 0xa097, 0x0788, 0xb9a99989);
+ nv_mthd(dev, 0xa097, 0x078c, 0xf9e9d9c9);
+ nv_mthd(dev, 0xa097, 0x07d0, 0x30201000);
+ nv_mthd(dev, 0xa097, 0x07d4, 0x70605040);
+ nv_mthd(dev, 0xa097, 0x07d8, 0x00009080);
+ nv_mthd(dev, 0xa097, 0x037c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0740, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0744, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x2600, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1918, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x191c, 0x00000900);
+ nv_mthd(dev, 0xa097, 0x1920, 0x00000405);
+ nv_mthd(dev, 0xa097, 0x1308, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1924, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x13ac, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x192c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x193c, 0x00002c1c);
+ nv_mthd(dev, 0xa097, 0x0d7c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02c0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1510, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1940, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ff4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0ff8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x194c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1950, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1968, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1590, 0x0000003f);
+ nv_mthd(dev, 0xa097, 0x07e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07f0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07f4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x196c, 0x00000011);
+ nv_mthd(dev, 0xa097, 0x02e4, 0x0000b001);
+ nv_mthd(dev, 0xa097, 0x036c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0370, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x197c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fcc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fd0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02d8, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x1980, 0x00000080);
+ nv_mthd(dev, 0xa097, 0x1504, 0x00000080);
+ nv_mthd(dev, 0xa097, 0x1984, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0300, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x13a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12ec, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1310, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1314, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1380, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1384, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1388, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x138c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1390, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1394, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x139c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1398, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1594, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1598, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x159c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15a0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15a4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0f54, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f58, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f5c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f9c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0fa0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12cc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x12e8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x130c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1360, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1364, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1368, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x136c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1370, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1374, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1378, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x137c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x133c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1340, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1344, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1348, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x134c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1350, 0x00000002);
+ nv_mthd(dev, 0xa097, 0x1358, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x12e4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x131c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1320, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1324, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1328, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1140, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19c8, 0x00001500);
+ nv_mthd(dev, 0xa097, 0x135c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x19e0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19e4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19e8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19ec, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f0, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19f8, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19fc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x19cc, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x15b8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a00, 0x00001111);
+ nv_mthd(dev, 0xa097, 0x1a04, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a08, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a0c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a10, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a14, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a18, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1a1c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d6c, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x0d70, 0xffff0000);
+ nv_mthd(dev, 0xa097, 0x10f8, 0x00001010);
+ nv_mthd(dev, 0xa097, 0x0d80, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d84, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d88, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d8c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0d90, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0da0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x07a8, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1508, 0x80000000);
+ nv_mthd(dev, 0xa097, 0x150c, 0x40000000);
+ nv_mthd(dev, 0xa097, 0x1668, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0318, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x031c, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x0d9c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x0374, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0378, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x07dc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x074c, 0x00000055);
+ nv_mthd(dev, 0xa097, 0x1420, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x17bc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c0, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x17c4, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1008, 0x00000008);
+ nv_mthd(dev, 0xa097, 0x100c, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x1010, 0x0000012c);
+ nv_mthd(dev, 0xa097, 0x0d60, 0x00000040);
+ nv_mthd(dev, 0xa097, 0x075c, 0x00000003);
+ nv_mthd(dev, 0xa097, 0x1018, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x101c, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1020, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x1024, 0x00000001);
+ nv_mthd(dev, 0xa097, 0x1444, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x1448, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x144c, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0360, 0x20164010);
+ nv_mthd(dev, 0xa097, 0x0364, 0x00000020);
+ nv_mthd(dev, 0xa097, 0x0368, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0de4, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0204, 0x00000006);
+ nv_mthd(dev, 0xa097, 0x0208, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x02cc, 0x003fffff);
+ nv_mthd(dev, 0xa097, 0x02d0, 0x003fffff);
+ nv_mthd(dev, 0xa097, 0x1220, 0x00000005);
+ nv_mthd(dev, 0xa097, 0x0fdc, 0x00000000);
+ nv_mthd(dev, 0xa097, 0x0f98, 0x00400008);
+ nv_mthd(dev, 0xa097, 0x1284, 0x08000080);
+ nv_mthd(dev, 0xa097, 0x1450, 0x00400008);
+ nv_mthd(dev, 0xa097, 0x1454, 0x08000080);
+ nv_mthd(dev, 0xa097, 0x0214, 0x00000000);
+}
+
+static void
+nve0_grctx_generate_902d(struct drm_device *dev)
+{
+ nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
+ nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
+ nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
+ nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
+ nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
+ nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
+ nv_mthd(dev, 0x902d, 0x3410, 0x00000000);
+}
+
+static void
+nve0_graph_generate_unk40xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404010, 0x0);
+ nv_wr32(dev, 0x404014, 0x0);
+ nv_wr32(dev, 0x404018, 0x0);
+ nv_wr32(dev, 0x40401c, 0x0);
+ nv_wr32(dev, 0x404020, 0x0);
+ nv_wr32(dev, 0x404024, 0xe000);
+ nv_wr32(dev, 0x404028, 0x0);
+ nv_wr32(dev, 0x4040a8, 0x0);
+ nv_wr32(dev, 0x4040ac, 0x0);
+ nv_wr32(dev, 0x4040b0, 0x0);
+ nv_wr32(dev, 0x4040b4, 0x0);
+ nv_wr32(dev, 0x4040b8, 0x0);
+ nv_wr32(dev, 0x4040bc, 0x0);
+ nv_wr32(dev, 0x4040c0, 0x0);
+ nv_wr32(dev, 0x4040c4, 0x0);
+ nv_wr32(dev, 0x4040c8, 0xf800008f);
+ nv_wr32(dev, 0x4040d0, 0x0);
+ nv_wr32(dev, 0x4040d4, 0x0);
+ nv_wr32(dev, 0x4040d8, 0x0);
+ nv_wr32(dev, 0x4040dc, 0x0);
+ nv_wr32(dev, 0x4040e0, 0x0);
+ nv_wr32(dev, 0x4040e4, 0x0);
+ nv_wr32(dev, 0x4040e8, 0x1000);
+ nv_wr32(dev, 0x4040f8, 0x0);
+ nv_wr32(dev, 0x404130, 0x0);
+ nv_wr32(dev, 0x404134, 0x0);
+ nv_wr32(dev, 0x404138, 0x20000040);
+ nv_wr32(dev, 0x404150, 0x2e);
+ nv_wr32(dev, 0x404154, 0x400);
+ nv_wr32(dev, 0x404158, 0x200);
+ nv_wr32(dev, 0x404164, 0x55);
+ nv_wr32(dev, 0x4041a0, 0x0);
+ nv_wr32(dev, 0x4041a4, 0x0);
+ nv_wr32(dev, 0x4041a8, 0x0);
+ nv_wr32(dev, 0x4041ac, 0x0);
+ nv_wr32(dev, 0x404200, 0x0);
+ nv_wr32(dev, 0x404204, 0x0);
+ nv_wr32(dev, 0x404208, 0x0);
+ nv_wr32(dev, 0x40420c, 0x0);
+}
+
+static void
+nve0_graph_generate_unk44xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404404, 0x0);
+ nv_wr32(dev, 0x404408, 0x0);
+ nv_wr32(dev, 0x40440c, 0x0);
+ nv_wr32(dev, 0x404410, 0x0);
+ nv_wr32(dev, 0x404414, 0x0);
+ nv_wr32(dev, 0x404418, 0x0);
+ nv_wr32(dev, 0x40441c, 0x0);
+ nv_wr32(dev, 0x404420, 0x0);
+ nv_wr32(dev, 0x404424, 0x0);
+ nv_wr32(dev, 0x404428, 0x0);
+ nv_wr32(dev, 0x40442c, 0x0);
+ nv_wr32(dev, 0x404430, 0x0);
+ nv_wr32(dev, 0x404434, 0x0);
+ nv_wr32(dev, 0x404438, 0x0);
+ nv_wr32(dev, 0x404460, 0x0);
+ nv_wr32(dev, 0x404464, 0x0);
+ nv_wr32(dev, 0x404468, 0xffffff);
+ nv_wr32(dev, 0x40446c, 0x0);
+ nv_wr32(dev, 0x404480, 0x1);
+ nv_wr32(dev, 0x404498, 0x1);
+}
+
+static void
+nve0_graph_generate_unk46xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404604, 0x14);
+ nv_wr32(dev, 0x404608, 0x0);
+ nv_wr32(dev, 0x40460c, 0x3fff);
+ nv_wr32(dev, 0x404610, 0x100);
+ nv_wr32(dev, 0x404618, 0x0);
+ nv_wr32(dev, 0x40461c, 0x0);
+ nv_wr32(dev, 0x404620, 0x0);
+ nv_wr32(dev, 0x404624, 0x0);
+ nv_wr32(dev, 0x40462c, 0x0);
+ nv_wr32(dev, 0x404630, 0x0);
+ nv_wr32(dev, 0x404640, 0x0);
+ nv_wr32(dev, 0x404654, 0x0);
+ nv_wr32(dev, 0x404660, 0x0);
+ nv_wr32(dev, 0x404678, 0x0);
+ nv_wr32(dev, 0x40467c, 0x2);
+ nv_wr32(dev, 0x404680, 0x0);
+ nv_wr32(dev, 0x404684, 0x0);
+ nv_wr32(dev, 0x404688, 0x0);
+ nv_wr32(dev, 0x40468c, 0x0);
+ nv_wr32(dev, 0x404690, 0x0);
+ nv_wr32(dev, 0x404694, 0x0);
+ nv_wr32(dev, 0x404698, 0x0);
+ nv_wr32(dev, 0x40469c, 0x0);
+ nv_wr32(dev, 0x4046a0, 0x7f0080);
+ nv_wr32(dev, 0x4046a4, 0x0);
+ nv_wr32(dev, 0x4046a8, 0x0);
+ nv_wr32(dev, 0x4046ac, 0x0);
+ nv_wr32(dev, 0x4046b0, 0x0);
+ nv_wr32(dev, 0x4046b4, 0x0);
+ nv_wr32(dev, 0x4046b8, 0x0);
+ nv_wr32(dev, 0x4046bc, 0x0);
+ nv_wr32(dev, 0x4046c0, 0x0);
+ nv_wr32(dev, 0x4046c8, 0x0);
+ nv_wr32(dev, 0x4046cc, 0x0);
+ nv_wr32(dev, 0x4046d0, 0x0);
+}
+
+static void
+nve0_graph_generate_unk47xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x404700, 0x0);
+ nv_wr32(dev, 0x404704, 0x0);
+ nv_wr32(dev, 0x404708, 0x0);
+ nv_wr32(dev, 0x404718, 0x0);
+ nv_wr32(dev, 0x40471c, 0x0);
+ nv_wr32(dev, 0x404720, 0x0);
+ nv_wr32(dev, 0x404724, 0x0);
+ nv_wr32(dev, 0x404728, 0x0);
+ nv_wr32(dev, 0x40472c, 0x0);
+ nv_wr32(dev, 0x404730, 0x0);
+ nv_wr32(dev, 0x404734, 0x100);
+ nv_wr32(dev, 0x404738, 0x0);
+ nv_wr32(dev, 0x40473c, 0x0);
+ nv_wr32(dev, 0x404744, 0x0);
+ nv_wr32(dev, 0x404748, 0x0);
+ nv_wr32(dev, 0x404754, 0x0);
+}
+
+static void
+nve0_graph_generate_unk58xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x405800, 0xf8000bf);
+ nv_wr32(dev, 0x405830, 0x2180648);
+ nv_wr32(dev, 0x405834, 0x8000000);
+ nv_wr32(dev, 0x405838, 0x0);
+ nv_wr32(dev, 0x405854, 0x0);
+ nv_wr32(dev, 0x405870, 0x1);
+ nv_wr32(dev, 0x405874, 0x1);
+ nv_wr32(dev, 0x405878, 0x1);
+ nv_wr32(dev, 0x40587c, 0x1);
+ nv_wr32(dev, 0x405a00, 0x0);
+ nv_wr32(dev, 0x405a04, 0x0);
+ nv_wr32(dev, 0x405a18, 0x0);
+ nv_wr32(dev, 0x405b00, 0x0);
+ nv_wr32(dev, 0x405b10, 0x1000);
+}
+
+static void
+nve0_graph_generate_unk60xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x406020, 0x4103c1);
+ nv_wr32(dev, 0x406028, 0x1);
+ nv_wr32(dev, 0x40602c, 0x1);
+ nv_wr32(dev, 0x406030, 0x1);
+ nv_wr32(dev, 0x406034, 0x1);
+}
+
+static void
+nve0_graph_generate_unk64xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x4064a8, 0x0);
+ nv_wr32(dev, 0x4064ac, 0x3fff);
+ nv_wr32(dev, 0x4064b4, 0x0);
+ nv_wr32(dev, 0x4064b8, 0x0);
+ nv_wr32(dev, 0x4064c0, 0x801a00f0);
+ nv_wr32(dev, 0x4064c4, 0x192ffff);
+ nv_wr32(dev, 0x4064c8, 0x1800600);
+ nv_wr32(dev, 0x4064cc, 0x0);
+ nv_wr32(dev, 0x4064d0, 0x0);
+ nv_wr32(dev, 0x4064d4, 0x0);
+ nv_wr32(dev, 0x4064d8, 0x0);
+ nv_wr32(dev, 0x4064dc, 0x0);
+ nv_wr32(dev, 0x4064e0, 0x0);
+ nv_wr32(dev, 0x4064e4, 0x0);
+ nv_wr32(dev, 0x4064e8, 0x0);
+ nv_wr32(dev, 0x4064ec, 0x0);
+ nv_wr32(dev, 0x4064fc, 0x22a);
+}
+
+static void
+nve0_graph_generate_unk70xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407040, 0x0);
+}
+
+static void
+nve0_graph_generate_unk78xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x407804, 0x23);
+ nv_wr32(dev, 0x40780c, 0xa418820);
+ nv_wr32(dev, 0x407810, 0x62080e6);
+ nv_wr32(dev, 0x407814, 0x20398a4);
+ nv_wr32(dev, 0x407818, 0xe629062);
+ nv_wr32(dev, 0x40781c, 0xa418820);
+ nv_wr32(dev, 0x407820, 0xe6);
+ nv_wr32(dev, 0x4078bc, 0x103);
+}
+
+static void
+nve0_graph_generate_unk80xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408000, 0x0);
+ nv_wr32(dev, 0x408004, 0x0);
+ nv_wr32(dev, 0x408008, 0x30);
+ nv_wr32(dev, 0x40800c, 0x0);
+ nv_wr32(dev, 0x408010, 0x0);
+ nv_wr32(dev, 0x408014, 0x69);
+ nv_wr32(dev, 0x408018, 0xe100e100);
+ nv_wr32(dev, 0x408064, 0x0);
+}
+
+static void
+nve0_graph_generate_unk88xx(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x408800, 0x2802a3c);
+ nv_wr32(dev, 0x408804, 0x40);
+ nv_wr32(dev, 0x408808, 0x1043e005);
+ nv_wr32(dev, 0x408840, 0xb);
+ nv_wr32(dev, 0x408900, 0x3080b801);
+ nv_wr32(dev, 0x408904, 0x62000001);
+ nv_wr32(dev, 0x408908, 0xc8102f);
+ nv_wr32(dev, 0x408980, 0x11d);
+}
+
+static void
+nve0_graph_generate_gpc(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x418380, 0x16);
+ nv_wr32(dev, 0x418400, 0x38004e00);
+ nv_wr32(dev, 0x418404, 0x71e0ffff);
+ nv_wr32(dev, 0x41840c, 0x1008);
+ nv_wr32(dev, 0x418410, 0xfff0fff);
+ nv_wr32(dev, 0x418414, 0x2200fff);
+ nv_wr32(dev, 0x418450, 0x0);
+ nv_wr32(dev, 0x418454, 0x0);
+ nv_wr32(dev, 0x418458, 0x0);
+ nv_wr32(dev, 0x41845c, 0x0);
+ nv_wr32(dev, 0x418460, 0x0);
+ nv_wr32(dev, 0x418464, 0x0);
+ nv_wr32(dev, 0x418468, 0x1);
+ nv_wr32(dev, 0x41846c, 0x0);
+ nv_wr32(dev, 0x418470, 0x0);
+ nv_wr32(dev, 0x418600, 0x1f);
+ nv_wr32(dev, 0x418684, 0xf);
+ nv_wr32(dev, 0x418700, 0x2);
+ nv_wr32(dev, 0x418704, 0x80);
+ nv_wr32(dev, 0x418708, 0x0);
+ nv_wr32(dev, 0x41870c, 0x0);
+ nv_wr32(dev, 0x418710, 0x0);
+ nv_wr32(dev, 0x418800, 0x7006860a);
+ nv_wr32(dev, 0x418808, 0x0);
+ nv_wr32(dev, 0x41880c, 0x0);
+ nv_wr32(dev, 0x418810, 0x0);
+ nv_wr32(dev, 0x418828, 0x44);
+ nv_wr32(dev, 0x418830, 0x10000001);
+ nv_wr32(dev, 0x4188d8, 0x8);
+ nv_wr32(dev, 0x4188e0, 0x1000000);
+ nv_wr32(dev, 0x4188e8, 0x0);
+ nv_wr32(dev, 0x4188ec, 0x0);
+ nv_wr32(dev, 0x4188f0, 0x0);
+ nv_wr32(dev, 0x4188f4, 0x0);
+ nv_wr32(dev, 0x4188f8, 0x0);
+ nv_wr32(dev, 0x4188fc, 0x20100018);
+ nv_wr32(dev, 0x41891c, 0xff00ff);
+ nv_wr32(dev, 0x418924, 0x0);
+ nv_wr32(dev, 0x418928, 0xffff00);
+ nv_wr32(dev, 0x41892c, 0xff00);
+ nv_wr32(dev, 0x418a00, 0x0);
+ nv_wr32(dev, 0x418a04, 0x0);
+ nv_wr32(dev, 0x418a08, 0x0);
+ nv_wr32(dev, 0x418a0c, 0x10000);
+ nv_wr32(dev, 0x418a10, 0x0);
+ nv_wr32(dev, 0x418a14, 0x0);
+ nv_wr32(dev, 0x418a18, 0x0);
+ nv_wr32(dev, 0x418a20, 0x0);
+ nv_wr32(dev, 0x418a24, 0x0);
+ nv_wr32(dev, 0x418a28, 0x0);
+ nv_wr32(dev, 0x418a2c, 0x10000);
+ nv_wr32(dev, 0x418a30, 0x0);
+ nv_wr32(dev, 0x418a34, 0x0);
+ nv_wr32(dev, 0x418a38, 0x0);
+ nv_wr32(dev, 0x418a40, 0x0);
+ nv_wr32(dev, 0x418a44, 0x0);
+ nv_wr32(dev, 0x418a48, 0x0);
+ nv_wr32(dev, 0x418a4c, 0x10000);
+ nv_wr32(dev, 0x418a50, 0x0);
+ nv_wr32(dev, 0x418a54, 0x0);
+ nv_wr32(dev, 0x418a58, 0x0);
+ nv_wr32(dev, 0x418a60, 0x0);
+ nv_wr32(dev, 0x418a64, 0x0);
+ nv_wr32(dev, 0x418a68, 0x0);
+ nv_wr32(dev, 0x418a6c, 0x10000);
+ nv_wr32(dev, 0x418a70, 0x0);
+ nv_wr32(dev, 0x418a74, 0x0);
+ nv_wr32(dev, 0x418a78, 0x0);
+ nv_wr32(dev, 0x418a80, 0x0);
+ nv_wr32(dev, 0x418a84, 0x0);
+ nv_wr32(dev, 0x418a88, 0x0);
+ nv_wr32(dev, 0x418a8c, 0x10000);
+ nv_wr32(dev, 0x418a90, 0x0);
+ nv_wr32(dev, 0x418a94, 0x0);
+ nv_wr32(dev, 0x418a98, 0x0);
+ nv_wr32(dev, 0x418aa0, 0x0);
+ nv_wr32(dev, 0x418aa4, 0x0);
+ nv_wr32(dev, 0x418aa8, 0x0);
+ nv_wr32(dev, 0x418aac, 0x10000);
+ nv_wr32(dev, 0x418ab0, 0x0);
+ nv_wr32(dev, 0x418ab4, 0x0);
+ nv_wr32(dev, 0x418ab8, 0x0);
+ nv_wr32(dev, 0x418ac0, 0x0);
+ nv_wr32(dev, 0x418ac4, 0x0);
+ nv_wr32(dev, 0x418ac8, 0x0);
+ nv_wr32(dev, 0x418acc, 0x10000);
+ nv_wr32(dev, 0x418ad0, 0x0);
+ nv_wr32(dev, 0x418ad4, 0x0);
+ nv_wr32(dev, 0x418ad8, 0x0);
+ nv_wr32(dev, 0x418ae0, 0x0);
+ nv_wr32(dev, 0x418ae4, 0x0);
+ nv_wr32(dev, 0x418ae8, 0x0);
+ nv_wr32(dev, 0x418aec, 0x10000);
+ nv_wr32(dev, 0x418af0, 0x0);
+ nv_wr32(dev, 0x418af4, 0x0);
+ nv_wr32(dev, 0x418af8, 0x0);
+ nv_wr32(dev, 0x418b00, 0x6);
+ nv_wr32(dev, 0x418b08, 0xa418820);
+ nv_wr32(dev, 0x418b0c, 0x62080e6);
+ nv_wr32(dev, 0x418b10, 0x20398a4);
+ nv_wr32(dev, 0x418b14, 0xe629062);
+ nv_wr32(dev, 0x418b18, 0xa418820);
+ nv_wr32(dev, 0x418b1c, 0xe6);
+ nv_wr32(dev, 0x418bb8, 0x103);
+ nv_wr32(dev, 0x418c08, 0x1);
+ nv_wr32(dev, 0x418c10, 0x0);
+ nv_wr32(dev, 0x418c14, 0x0);
+ nv_wr32(dev, 0x418c18, 0x0);
+ nv_wr32(dev, 0x418c1c, 0x0);
+ nv_wr32(dev, 0x418c20, 0x0);
+ nv_wr32(dev, 0x418c24, 0x0);
+ nv_wr32(dev, 0x418c28, 0x0);
+ nv_wr32(dev, 0x418c2c, 0x0);
+ nv_wr32(dev, 0x418c40, 0xffffffff);
+ nv_wr32(dev, 0x418c6c, 0x1);
+ nv_wr32(dev, 0x418c80, 0x20200004);
+ nv_wr32(dev, 0x418c8c, 0x1);
+ nv_wr32(dev, 0x419000, 0x780);
+ nv_wr32(dev, 0x419004, 0x0);
+ nv_wr32(dev, 0x419008, 0x0);
+ nv_wr32(dev, 0x419014, 0x4);
+}
+
+static void
+nve0_graph_generate_tpc(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x419848, 0x0);
+ nv_wr32(dev, 0x419864, 0x129);
+ nv_wr32(dev, 0x419888, 0x0);
+ nv_wr32(dev, 0x419a00, 0xf0);
+ nv_wr32(dev, 0x419a04, 0x1);
+ nv_wr32(dev, 0x419a08, 0x21);
+ nv_wr32(dev, 0x419a0c, 0x20000);
+ nv_wr32(dev, 0x419a10, 0x0);
+ nv_wr32(dev, 0x419a14, 0x200);
+ nv_wr32(dev, 0x419a1c, 0xc000);
+ nv_wr32(dev, 0x419a20, 0x800);
+ nv_wr32(dev, 0x419a30, 0x1);
+ nv_wr32(dev, 0x419ac4, 0x37f440);
+ nv_wr32(dev, 0x419c00, 0xa);
+ nv_wr32(dev, 0x419c04, 0x80000006);
+ nv_wr32(dev, 0x419c08, 0x2);
+ nv_wr32(dev, 0x419c20, 0x0);
+ nv_wr32(dev, 0x419c24, 0x84210);
+ nv_wr32(dev, 0x419c28, 0x3efbefbe);
+ nv_wr32(dev, 0x419ce8, 0x0);
+ nv_wr32(dev, 0x419cf4, 0x3203);
+ nv_wr32(dev, 0x419e04, 0x0);
+ nv_wr32(dev, 0x419e08, 0x0);
+ nv_wr32(dev, 0x419e0c, 0x0);
+ nv_wr32(dev, 0x419e10, 0x402);
+ nv_wr32(dev, 0x419e44, 0x13eff2);
+ nv_wr32(dev, 0x419e48, 0x0);
+ nv_wr32(dev, 0x419e4c, 0x7f);
+ nv_wr32(dev, 0x419e50, 0x0);
+ nv_wr32(dev, 0x419e54, 0x0);
+ nv_wr32(dev, 0x419e58, 0x0);
+ nv_wr32(dev, 0x419e5c, 0x0);
+ nv_wr32(dev, 0x419e60, 0x0);
+ nv_wr32(dev, 0x419e64, 0x0);
+ nv_wr32(dev, 0x419e68, 0x0);
+ nv_wr32(dev, 0x419e6c, 0x0);
+ nv_wr32(dev, 0x419e70, 0x0);
+ nv_wr32(dev, 0x419e74, 0x0);
+ nv_wr32(dev, 0x419e78, 0x0);
+ nv_wr32(dev, 0x419e7c, 0x0);
+ nv_wr32(dev, 0x419e80, 0x0);
+ nv_wr32(dev, 0x419e84, 0x0);
+ nv_wr32(dev, 0x419e88, 0x0);
+ nv_wr32(dev, 0x419e8c, 0x0);
+ nv_wr32(dev, 0x419e90, 0x0);
+ nv_wr32(dev, 0x419e94, 0x0);
+ nv_wr32(dev, 0x419e98, 0x0);
+ nv_wr32(dev, 0x419eac, 0x1fcf);
+ nv_wr32(dev, 0x419eb0, 0xd3f);
+ nv_wr32(dev, 0x419ec8, 0x1304f);
+ nv_wr32(dev, 0x419f30, 0x0);
+ nv_wr32(dev, 0x419f34, 0x0);
+ nv_wr32(dev, 0x419f38, 0x0);
+ nv_wr32(dev, 0x419f3c, 0x0);
+ nv_wr32(dev, 0x419f40, 0x0);
+ nv_wr32(dev, 0x419f44, 0x0);
+ nv_wr32(dev, 0x419f48, 0x0);
+ nv_wr32(dev, 0x419f4c, 0x0);
+ nv_wr32(dev, 0x419f58, 0x0);
+ nv_wr32(dev, 0x419f78, 0xb);
+}
+
+static void
+nve0_graph_generate_tpcunk(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x41be24, 0x6);
+ nv_wr32(dev, 0x41bec0, 0x12180000);
+ nv_wr32(dev, 0x41bec4, 0x37f7f);
+ nv_wr32(dev, 0x41bee4, 0x6480430);
+ nv_wr32(dev, 0x41bf00, 0xa418820);
+ nv_wr32(dev, 0x41bf04, 0x62080e6);
+ nv_wr32(dev, 0x41bf08, 0x20398a4);
+ nv_wr32(dev, 0x41bf0c, 0xe629062);
+ nv_wr32(dev, 0x41bf10, 0xa418820);
+ nv_wr32(dev, 0x41bf14, 0xe6);
+ nv_wr32(dev, 0x41bfd0, 0x900103);
+ nv_wr32(dev, 0x41bfe0, 0x400001);
+ nv_wr32(dev, 0x41bfe4, 0x0);
+}
+
+int
+nve0_grctx_generate(struct nouveau_channel *chan)
+{
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ u32 data[6] = {}, data2[2] = {}, tmp;
+ u32 tpc_set = 0, tpc_mask = 0;
+ u8 tpcnr[GPC_MAX], a, b;
+ u8 shift, ntpcv;
+ int i, gpc, tpc, id;
+
+ nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x400204, 0x00000000);
+ nv_wr32(dev, 0x400208, 0x00000000);
+
+ nve0_graph_generate_unk40xx(dev);
+ nve0_graph_generate_unk44xx(dev);
+ nve0_graph_generate_unk46xx(dev);
+ nve0_graph_generate_unk47xx(dev);
+ nve0_graph_generate_unk58xx(dev);
+ nve0_graph_generate_unk60xx(dev);
+ nve0_graph_generate_unk64xx(dev);
+ nve0_graph_generate_unk70xx(dev);
+ nve0_graph_generate_unk78xx(dev);
+ nve0_graph_generate_unk80xx(dev);
+ nve0_graph_generate_unk88xx(dev);
+ nve0_graph_generate_gpc(dev);
+ nve0_graph_generate_tpc(dev);
+ nve0_graph_generate_tpcunk(dev);
+
+ nv_wr32(dev, 0x404154, 0x0);
+
+ for (i = 0; i < grch->mmio_nr * 8; i += 8) {
+ u32 reg = nv_ro32(grch->mmio, i + 0);
+ u32 val = nv_ro32(grch->mmio, i + 4);
+ nv_wr32(dev, reg, val);
+ }
+
+ nv_wr32(dev, 0x418c6c, 0x1);
+ nv_wr32(dev, 0x41980c, 0x10);
+ nv_wr32(dev, 0x41be08, 0x4);
+ nv_wr32(dev, 0x4064c0, 0x801a00f0);
+ nv_wr32(dev, 0x405800, 0xf8000bf);
+ nv_wr32(dev, 0x419c00, 0xa);
+
+ for (tpc = 0, id = 0; tpc < 4; tpc++) {
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ if (tpc < priv->tpc_nr[gpc]) {
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0698), id);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x04e8), id);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0088), id++);
+ }
+
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
+ }
+ }
+
+ tmp = 0;
+ for (i = 0; i < priv->gpc_nr; i++)
+ tmp |= priv->tpc_nr[i] << (i * 4);
+ nv_wr32(dev, 0x406028, tmp);
+ nv_wr32(dev, 0x405870, tmp);
+
+ nv_wr32(dev, 0x40602c, 0x0);
+ nv_wr32(dev, 0x405874, 0x0);
+ nv_wr32(dev, 0x406030, 0x0);
+ nv_wr32(dev, 0x405878, 0x0);
+ nv_wr32(dev, 0x406034, 0x0);
+ nv_wr32(dev, 0x40587c, 0x0);
+
+ /* calculate first set of magics */
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+
+ gpc = -1;
+ for (tpc = 0; tpc < priv->tpc_total; tpc++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpcnr[gpc]--;
+
+ data[tpc / 6] |= gpc << ((tpc % 6) * 5);
+ }
+
+ for (; tpc < 32; tpc++)
+ data[tpc / 6] |= 7 << ((tpc % 6) * 5);
+
+ /* and the second... */
+ shift = 0;
+ ntpcv = priv->tpc_total;
+ while (!(ntpcv & (1 << 4))) {
+ ntpcv <<= 1;
+ shift++;
+ }
+
+ data2[0] = ntpcv << 16;
+ data2[0] |= shift << 21;
+ data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
+ data2[0] |= priv->tpc_total << 8;
+ data2[0] |= priv->magic_not_rop_nr;
+ for (i = 1; i < 7; i++)
+ data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
+
+ /* and write it all the various parts of PGRAPH */
+ nv_wr32(dev, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
+
+ nv_wr32(dev, 0x41bfd0, data2[0]);
+ nv_wr32(dev, 0x41bfe4, data2[1]);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x41bf00 + (i * 4), data[i]);
+
+ nv_wr32(dev, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
+ for (i = 0; i < 6; i++)
+ nv_wr32(dev, 0x40780c + (i * 4), data[i]);
+
+
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++)
+ tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
+
+ for (i = 0, gpc = -1, b = -1; i < 32; i++) {
+ a = (i * (priv->tpc_total - 1)) / 32;
+ if (a != b) {
+ b = a;
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ tpc_set |= 1 << ((gpc * 8) + tpc);
+ }
+
+ nv_wr32(dev, 0x406800 + (i * 0x20), tpc_set);
+ nv_wr32(dev, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
+ }
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x4064d0 + (i * 0x04), 0x00000000);
+
+ nv_wr32(dev, 0x405b00, 0x201);
+ nv_wr32(dev, 0x408850, 0x2);
+ nv_wr32(dev, 0x408958, 0x2);
+ nv_wr32(dev, 0x419f78, 0xa);
+
+ nve0_grctx_generate_icmd(dev);
+ nve0_grctx_generate_a097(dev);
+ nve0_grctx_generate_902d(dev);
+
+ nv_mask(dev, 0x000260, 0x00000001, 0x00000001);
+ nv_wr32(dev, 0x418800, 0x7026860a); //XXX
+ nv_wr32(dev, 0x41be10, 0x00bb8bc7); //XXX
+ return 0;
+}
--- /dev/null
+/* fuc microcode for nvc0 PGRAPH/GPC
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h
+ */
+
+/* TODO
+ * - bracket certain functions with scratch writes, useful for debugging
+ * - watchdog timer around ctx operations
+ */
+
+.section #nvc0_grgpc_data
+include(`nvc0_graph.fuc')
+gpc_id: .b32 0
+gpc_mmio_list_head: .b32 0
+gpc_mmio_list_tail: .b32 0
+
+tpc_count: .b32 0
+tpc_mask: .b32 0
+tpc_mmio_list_head: .b32 0
+tpc_mmio_list_tail: .b32 0
+
+cmd_queue: queue_init
+
+// chipset descriptions
+chipsets:
+.b8 0xc0 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
+.b8 0xc1 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc1_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc1_tpc_mmio_tail
+.b8 0xc3 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
+.b8 0xc4 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
+.b8 0xc8 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc0_tpc_mmio_tail
+.b8 0xce 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvc3_tpc_mmio_tail
+.b8 0xcf 0 0 0
+.b16 #nvc0_gpc_mmio_head
+.b16 #nvc0_gpc_mmio_tail
+.b16 #nvc0_tpc_mmio_head
+.b16 #nvcf_tpc_mmio_tail
+.b8 0xd9 0 0 0
+.b16 #nvd9_gpc_mmio_head
+.b16 #nvd9_gpc_mmio_tail
+.b16 #nvd9_tpc_mmio_head
+.b16 #nvd9_tpc_mmio_tail
+.b8 0 0 0 0
+
+// GPC mmio lists
+nvc0_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 6)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvc0_gpc_mmio_tail:
+mmctx_data(0x000c6c, 1);
+nvc1_gpc_mmio_tail:
+
+nvd9_gpc_mmio_head:
+mmctx_data(0x000380, 1)
+mmctx_data(0x000400, 2)
+mmctx_data(0x00040c, 3)
+mmctx_data(0x000450, 9)
+mmctx_data(0x000600, 1)
+mmctx_data(0x000684, 1)
+mmctx_data(0x000700, 5)
+mmctx_data(0x000800, 1)
+mmctx_data(0x000808, 3)
+mmctx_data(0x000828, 1)
+mmctx_data(0x000830, 1)
+mmctx_data(0x0008d8, 1)
+mmctx_data(0x0008e0, 1)
+mmctx_data(0x0008e8, 6)
+mmctx_data(0x00091c, 1)
+mmctx_data(0x000924, 3)
+mmctx_data(0x000b00, 1)
+mmctx_data(0x000b08, 6)
+mmctx_data(0x000bb8, 1)
+mmctx_data(0x000c08, 1)
+mmctx_data(0x000c10, 8)
+mmctx_data(0x000c6c, 1)
+mmctx_data(0x000c80, 1)
+mmctx_data(0x000c8c, 1)
+mmctx_data(0x001000, 3)
+mmctx_data(0x001014, 1)
+nvd9_gpc_mmio_tail:
+
+// TPC mmio lists
+nvc0_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 1)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x000750, 2)
+nvc0_tpc_mmio_tail:
+mmctx_data(0x000758, 1)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x0006e0, 1)
+nvcf_tpc_mmio_tail:
+mmctx_data(0x0004bc, 1)
+nvc3_tpc_mmio_tail:
+mmctx_data(0x000544, 1)
+nvc1_tpc_mmio_tail:
+
+nvd9_tpc_mmio_head:
+mmctx_data(0x000018, 1)
+mmctx_data(0x00003c, 1)
+mmctx_data(0x000048, 1)
+mmctx_data(0x000064, 1)
+mmctx_data(0x000088, 1)
+mmctx_data(0x000200, 6)
+mmctx_data(0x00021c, 2)
+mmctx_data(0x0002c4, 1)
+mmctx_data(0x000300, 6)
+mmctx_data(0x0003d0, 1)
+mmctx_data(0x0003e0, 2)
+mmctx_data(0x000400, 3)
+mmctx_data(0x000420, 3)
+mmctx_data(0x0004b0, 1)
+mmctx_data(0x0004e8, 1)
+mmctx_data(0x0004f4, 1)
+mmctx_data(0x000520, 2)
+mmctx_data(0x000544, 1)
+mmctx_data(0x000604, 4)
+mmctx_data(0x000644, 20)
+mmctx_data(0x000698, 1)
+mmctx_data(0x0006e0, 1)
+mmctx_data(0x000750, 3)
+nvd9_tpc_mmio_tail:
+
+.section #nvc0_grgpc_code
+bra #init
+define(`include_code')
+include(`nvc0_graph.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nvc0_graph.fuc)
+//
+error:
+ push $r14
+ mov $r14 -0x67ec // 0x9814
+ sethi $r14 0x400000
+ call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
+ add b32 $r14 0x41c
+ mov $r15 1
+ call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
+ pop $r14
+ ret
+
+// GPC fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+// CC_SCRATCH[1]: context base
+//
+// Output:
+// CC_SCRATCH[0]:
+// 31:31: set to signal completion
+// CC_SCRATCH[1]:
+// 31:0: GPC context size
+//
+init:
+ clear b32 $r0
+ mov $sp $r0
+
+ // enable fifo access
+ mov $r1 0x1200
+ mov $r2 2
+ iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+
+ // setup i0 handler, and route all interrupts to it
+ mov $r1 #ih
+ mov $iv0 $r1
+ mov $r1 0x400
+ iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
+
+ // enable fifo interrupt
+ mov $r2 4
+ iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+
+ // enable interrupts
+ bset $flags ie0
+
+ // figure out which GPC we are, and how many TPCs we have
+ mov $r1 0x608
+ shl b32 $r1 6
+ iord $r2 I[$r1 + 0x000] // UNITS
+ mov $r3 1
+ and $r2 0x1f
+ shl b32 $r3 $r2
+ sub b32 $r3 1
+ st b32 D[$r0 + #tpc_count] $r2
+ st b32 D[$r0 + #tpc_mask] $r3
+ add b32 $r1 0x400
+ iord $r2 I[$r1 + 0x000] // MYINDEX
+ st b32 D[$r0 + #gpc_id] $r2
+
+ // find context data for this chipset
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
+ mov $r1 #chipsets - 12
+ init_find_chipset:
+ add b32 $r1 12
+ ld b32 $r3 D[$r1 + 0x00]
+ cmpu b32 $r3 $r2
+ bra e #init_context
+ cmpu b32 $r3 0
+ bra ne #init_find_chipset
+ // unknown chipset
+ ret
+
+ // initialise context base, and size tracking
+ init_context:
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
+ clear b32 $r3 // track GPC context size here
+
+ // set mmctx base addresses now so we don't have to do it later,
+ // they don't currently ever change
+ mov $r4 0x700
+ shl b32 $r4 6
+ shr b32 $r5 $r2 8
+ iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
+ iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
+
+ // calculate GPC mmio context size, store the chipset-specific
+ // mmio list pointers somewhere we can get at them later without
+ // re-parsing the chipset list
+ clear b32 $r14
+ clear b32 $r15
+ ld b16 $r14 D[$r1 + 4]
+ ld b16 $r15 D[$r1 + 6]
+ st b16 D[$r0 + #gpc_mmio_list_head] $r14
+ st b16 D[$r0 + #gpc_mmio_list_tail] $r15
+ call #mmctx_size
+ add b32 $r2 $r15
+ add b32 $r3 $r15
+
+ // calculate per-TPC mmio context size, store the list pointers
+ ld b16 $r14 D[$r1 + 8]
+ ld b16 $r15 D[$r1 + 10]
+ st b16 D[$r0 + #tpc_mmio_list_head] $r14
+ st b16 D[$r0 + #tpc_mmio_list_tail] $r15
+ call #mmctx_size
+ ld b32 $r14 D[$r0 + #tpc_count]
+ mulu $r14 $r15
+ add b32 $r2 $r14
+ add b32 $r3 $r14
+
+ // round up base/size to 256 byte boundary (for strand SWBASE)
+ add b32 $r4 0x1300
+ shr b32 $r3 2
+ iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
+ shr b32 $r2 8
+ shr b32 $r3 6
+ add b32 $r2 1
+ add b32 $r3 1
+ shl b32 $r2 8
+ shl b32 $r3 8
+
+ // calculate size of strand context data
+ mov b32 $r15 $r2
+ call #strand_ctx_init
+ add b32 $r3 $r15
+
+ // save context size, and tell HUB we're done
+ mov $r1 0x800
+ shl b32 $r1 6
+ iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
+ add b32 $r1 0x800
+ clear b32 $r2
+ bset $r2 31
+ iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+ bset $flags $p0
+ sleep $p0
+ mov $r13 #cmd_queue
+ call #queue_get
+ bra $p1 #main
+
+ // 0x0000-0x0003 are all context transfers
+ cmpu b32 $r14 0x04
+ bra nc #main_not_ctx_xfer
+ // fetch $flags and mask off $p1/$p2
+ mov $r1 $flags
+ mov $r2 0x0006
+ not b32 $r2
+ and $r1 $r2
+ // set $p1/$p2 according to transfer type
+ shl b32 $r14 1
+ or $r1 $r14
+ mov $flags $r1
+ // transfer context data
+ call #ctx_xfer
+ bra #main
+
+ main_not_ctx_xfer:
+ shl b32 $r15 $r14 16
+ or $r15 E_BAD_COMMAND
+ call #error
+ bra #main
+
+// interrupt handler
+ih:
+ push $r8
+ mov $r8 $flags
+ push $r8
+ push $r9
+ push $r10
+ push $r11
+ push $r13
+ push $r14
+ push $r15
+
+ // incoming fifo command?
+ iord $r10 I[$r0 + 0x200] // INTR
+ and $r11 $r10 0x00000004
+ bra e #ih_no_fifo
+ // queue incoming fifo command for later processing
+ mov $r11 0x1900
+ mov $r13 #cmd_queue
+ iord $r14 I[$r11 + 0x100] // FIFO_CMD
+ iord $r15 I[$r11 + 0x000] // FIFO_DATA
+ call #queue_put
+ add b32 $r11 0x400
+ mov $r14 1
+ iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+
+ // ack, and wake up main()
+ ih_no_fifo:
+ iowr I[$r0 + 0x100] $r10 // INTR_ACK
+
+ pop $r15
+ pop $r14
+ pop $r13
+ pop $r11
+ pop $r10
+ pop $r9
+ pop $r8
+ mov $flags $r8
+ pop $r8
+ bclr $flags $p0
+ iret
+
+// Set this GPC's bit in HUB_BAR, used to signal completion of various
+// activities to the HUB fuc
+//
+hub_barrier_done:
+ mov $r15 1
+ ld b32 $r14 D[$r0 + #gpc_id]
+ shl b32 $r15 $r14
+ mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
+ sethi $r14 0x400000
+ call #nv_wr32
+ ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off? Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+ mov $r14 0x614
+ shl b32 $r14 6
+ mov $r15 0x020
+ iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
+ mov $r15 8
+ ctx_redswitch_delay:
+ sub b32 $r15 1
+ bra ne #ctx_redswitch_delay
+ mov $r15 0xa20
+ iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
+ ret
+
+// Transfer GPC context data between GPU and storage area
+//
+// In: $r15 context base address
+// $p1 clear on save, set on load
+// $p2 set if opposite direction done/will be done, so:
+// on save it means: "a load will follow this save"
+// on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+ // set context base address
+ mov $r1 0xa04
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r15// MEM_BASE
+ bra not $p1 #ctx_xfer_not_load
+ call #ctx_redswitch
+ ctx_xfer_not_load:
+
+ // strands
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xc
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
+ call #strand_wait
+ mov $r2 0x47fc
+ sethi $r2 0x20000
+ iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
+ xbit $r2 $flags $p1
+ add b32 $r2 3
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+ // mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 2 // first
+ mov $r11 0x0000
+ sethi $r11 0x500000
+ ld b32 $r12 D[$r0 + #gpc_id]
+ shl b32 $r12 15
+ add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
+ ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
+ mov $r14 0 // not multi
+ call #mmctx_xfer
+
+ // per-TPC mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 4 // last
+ mov $r11 0x4000
+ sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
+ ld b32 $r12 D[$r0 + #gpc_id]
+ shl b32 $r12 15
+ add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
+ ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
+ ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
+ ld b32 $r15 D[$r0 + #tpc_mask]
+ mov $r14 0x800 // stride = 0x800
+ call #mmctx_xfer
+
+ // wait for strands to finish
+ call #strand_wait
+
+ // if load, or a save without a load following, do some
+ // unknown stuff that's done after finishing a block of
+ // strand commands
+ bra $p1 #ctx_xfer_post
+ bra not $p2 #ctx_xfer_done
+ ctx_xfer_post:
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xd
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
+ call #strand_wait
+
+ // mark completion in HUB's barrier
+ ctx_xfer_done:
+ call #hub_barrier_done
+ ret
+
+.align 256
--- /dev/null
+uint32_t nvc0_grgpc_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x000000c0,
+ 0x012800c8,
+ 0x01e40194,
+ 0x000000c1,
+ 0x012c00c8,
+ 0x01f80194,
+ 0x000000c3,
+ 0x012800c8,
+ 0x01f40194,
+ 0x000000c4,
+ 0x012800c8,
+ 0x01f40194,
+ 0x000000c8,
+ 0x012800c8,
+ 0x01e40194,
+ 0x000000ce,
+ 0x012800c8,
+ 0x01f40194,
+ 0x000000cf,
+ 0x012800c8,
+ 0x01f00194,
+ 0x000000d9,
+ 0x0194012c,
+ 0x025401f8,
+ 0x00000000,
+ 0x00000380,
+ 0x14000400,
+ 0x20000450,
+ 0x00000600,
+ 0x00000684,
+ 0x10000700,
+ 0x00000800,
+ 0x08000808,
+ 0x00000828,
+ 0x00000830,
+ 0x000008d8,
+ 0x000008e0,
+ 0x140008e8,
+ 0x0000091c,
+ 0x08000924,
+ 0x00000b00,
+ 0x14000b08,
+ 0x00000bb8,
+ 0x00000c08,
+ 0x1c000c10,
+ 0x00000c80,
+ 0x00000c8c,
+ 0x08001000,
+ 0x00001014,
+ 0x00000c6c,
+ 0x00000380,
+ 0x04000400,
+ 0x0800040c,
+ 0x20000450,
+ 0x00000600,
+ 0x00000684,
+ 0x10000700,
+ 0x00000800,
+ 0x08000808,
+ 0x00000828,
+ 0x00000830,
+ 0x000008d8,
+ 0x000008e0,
+ 0x140008e8,
+ 0x0000091c,
+ 0x08000924,
+ 0x00000b00,
+ 0x14000b08,
+ 0x00000bb8,
+ 0x00000c08,
+ 0x1c000c10,
+ 0x00000c6c,
+ 0x00000c80,
+ 0x00000c8c,
+ 0x08001000,
+ 0x00001014,
+ 0x00000018,
+ 0x0000003c,
+ 0x00000048,
+ 0x00000064,
+ 0x00000088,
+ 0x14000200,
+ 0x0400021c,
+ 0x14000300,
+ 0x000003d0,
+ 0x040003e0,
+ 0x08000400,
+ 0x00000420,
+ 0x000004b0,
+ 0x000004e8,
+ 0x000004f4,
+ 0x04000520,
+ 0x0c000604,
+ 0x4c000644,
+ 0x00000698,
+ 0x04000750,
+ 0x00000758,
+ 0x000002c4,
+ 0x000006e0,
+ 0x000004bc,
+ 0x00000544,
+ 0x00000018,
+ 0x0000003c,
+ 0x00000048,
+ 0x00000064,
+ 0x00000088,
+ 0x14000200,
+ 0x0400021c,
+ 0x000002c4,
+ 0x14000300,
+ 0x000003d0,
+ 0x040003e0,
+ 0x08000400,
+ 0x08000420,
+ 0x000004b0,
+ 0x000004e8,
+ 0x000004f4,
+ 0x04000520,
+ 0x00000544,
+ 0x0c000604,
+ 0x4c000644,
+ 0x00000698,
+ 0x000006e0,
+ 0x08000750,
+};
+
+uint32_t nvc0_grgpc_code[] = {
+ 0x03060ef5,
+ 0x9800d898,
+ 0x86f001d9,
+ 0x0489b808,
+ 0xf00c1bf4,
+ 0x21f502f7,
+ 0x00f802ec,
+ 0xb60798c4,
+ 0x8dbb0384,
+ 0x0880b600,
+ 0x80008e80,
+ 0x90b6018f,
+ 0x0f94f001,
+ 0xf801d980,
+ 0x0131f400,
+ 0x9800d898,
+ 0x89b801d9,
+ 0x210bf404,
+ 0xb60789c4,
+ 0x9dbb0394,
+ 0x0890b600,
+ 0x98009e98,
+ 0x80b6019f,
+ 0x0f84f001,
+ 0xf400d880,
+ 0x00f80132,
+ 0x0728b7f1,
+ 0xb906b4b6,
+ 0xc9f002ec,
+ 0x00bcd01f,
+ 0xc800bccf,
+ 0x1bf41fcc,
+ 0x06a7f0fa,
+ 0x010321f5,
+ 0xf840bfcf,
+ 0x28b7f100,
+ 0x06b4b607,
+ 0xb980bfd0,
+ 0xc9f002ec,
+ 0x1ec9f01f,
+ 0xcf00bcd0,
+ 0xccc800bc,
+ 0xfa1bf41f,
+ 0x87f100f8,
+ 0x84b60430,
+ 0x1ff9f006,
+ 0xf8008fd0,
+ 0x3087f100,
+ 0x0684b604,
+ 0xf80080d0,
+ 0x3c87f100,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d000,
+ 0x081887f1,
+ 0xd00684b6,
+ 0x87f1008a,
+ 0x84b60400,
+ 0x0088cf06,
+ 0xf4888aff,
+ 0x87f1f31b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00099,
+ 0xf100f800,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00099f0,
+ 0x87f10089,
+ 0x84b60818,
+ 0x008ad006,
+ 0x040087f1,
+ 0xcf0684b6,
+ 0x8aff0088,
+ 0xf30bf488,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0099f094,
+ 0xf80089d0,
+ 0x9894bd00,
+ 0x85b600e8,
+ 0x0180b61a,
+ 0xbb0284b6,
+ 0xe0b60098,
+ 0x04efb804,
+ 0xb9eb1bf4,
+ 0x00f8029f,
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0199f094,
+ 0xf10089d0,
+ 0xb6071087,
+ 0x94bd0684,
+ 0xf405bbfd,
+ 0x8bd0090b,
+ 0x0099f000,
+ 0xf405eefd,
+ 0x8ed00c0b,
+ 0xc08fd080,
+ 0xb70199f0,
+ 0xc8010080,
+ 0xb4b600ab,
+ 0x0cb9f010,
+ 0xb601aec8,
+ 0xbefd11e4,
+ 0x008bd005,
+ 0xf0008ecf,
+ 0x0bf41fe4,
+ 0x00ce98fa,
+ 0xd005e9fd,
+ 0xc0b6c08e,
+ 0x04cdb804,
+ 0xc8e81bf4,
+ 0x1bf402ab,
+ 0x008bcf18,
+ 0xb01fb4f0,
+ 0x1bf410b4,
+ 0x02a7f0f7,
+ 0xf4c921f4,
+ 0xabc81b0e,
+ 0x10b4b600,
+ 0xf00cb9f0,
+ 0x8bd012b9,
+ 0x008bcf00,
+ 0xf412bbc8,
+ 0x87f1fa1b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00199,
+ 0xf900f800,
+ 0x02a7f0a0,
+ 0xfcc921f4,
+ 0xf100f8a0,
+ 0xf04afc87,
+ 0x97f00283,
+ 0x0089d00c,
+ 0x020721f5,
+ 0x87f100f8,
+ 0x83f04afc,
+ 0x0d97f002,
+ 0xf50089d0,
+ 0xf8020721,
+ 0xfca7f100,
+ 0x02a3f04f,
+ 0x0500aba2,
+ 0xd00fc7f0,
+ 0xc7f000ac,
+ 0x00bcd00b,
+ 0x020721f5,
+ 0xf000aed0,
+ 0xbcd00ac7,
+ 0x0721f500,
+ 0xf100f802,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x21f50089,
+ 0xe7f00213,
+ 0x3921f503,
+ 0xfca7f102,
+ 0x02a3f046,
+ 0x0400aba0,
+ 0xf040a0d0,
+ 0xbcd001c7,
+ 0x0721f500,
+ 0x010c9202,
+ 0xf000acd0,
+ 0xbcd002c7,
+ 0x0721f500,
+ 0x2621f502,
+ 0x8087f102,
+ 0x0684b608,
+ 0xb70089cf,
+ 0x95220080,
+ 0x8ed008fe,
+ 0x408ed000,
+ 0xb6808acf,
+ 0xa0b606a5,
+ 0x00eabb01,
+ 0xb60480b6,
+ 0x1bf40192,
+ 0x08e4b6e8,
+ 0xf1f2efbc,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x00f80089,
+ 0xe7f1e0f9,
+ 0xe3f09814,
+ 0x8d21f440,
+ 0x041ce0b7,
+ 0xf401f7f0,
+ 0xe0fc8d21,
+ 0x04bd00f8,
+ 0xf10004fe,
+ 0xf0120017,
+ 0x12d00227,
+ 0x3e17f100,
+ 0x0010fe04,
+ 0x040017f1,
+ 0xf0c010d0,
+ 0x12d00427,
+ 0x1031f400,
+ 0x060817f1,
+ 0xcf0614b6,
+ 0x37f00012,
+ 0x1f24f001,
+ 0xb60432bb,
+ 0x02800132,
+ 0x04038003,
+ 0x040010b7,
+ 0x800012cf,
+ 0x27f10002,
+ 0x24b60800,
+ 0x0022cf06,
+ 0xb65817f0,
+ 0x13980c10,
+ 0x0432b800,
+ 0xb00b0bf4,
+ 0x1bf40034,
+ 0xf100f8f1,
+ 0xb6080027,
+ 0x22cf0624,
+ 0xf134bd40,
+ 0xb6070047,
+ 0x25950644,
+ 0x0045d008,
+ 0xbd4045d0,
+ 0x58f4bde4,
+ 0x1f58021e,
+ 0x020e4003,
+ 0xf5040f40,
+ 0xbb013d21,
+ 0x3fbb002f,
+ 0x041e5800,
+ 0x40051f58,
+ 0x0f400a0e,
+ 0x3d21f50c,
+ 0x030e9801,
+ 0xbb00effd,
+ 0x3ebb002e,
+ 0x0040b700,
+ 0x0235b613,
+ 0xb60043d0,
+ 0x35b60825,
+ 0x0120b606,
+ 0xb60130b6,
+ 0x34b60824,
+ 0x022fb908,
+ 0x026321f5,
+ 0xf1003fbb,
+ 0xb6080017,
+ 0x13d00614,
+ 0x0010b740,
+ 0xf024bd08,
+ 0x12d01f29,
+ 0x0031f400,
+ 0xf00028f4,
+ 0x21f41cd7,
+ 0xf401f439,
+ 0xf404e4b0,
+ 0x81fe1e18,
+ 0x0627f001,
+ 0x12fd20bd,
+ 0x01e4b604,
+ 0xfe051efd,
+ 0x21f50018,
+ 0x0ef404c3,
+ 0x10ef94d3,
+ 0xf501f5f0,
+ 0xf402ec21,
+ 0x80f9c60e,
+ 0xf90188fe,
+ 0xf990f980,
+ 0xf9b0f9a0,
+ 0xf9e0f9d0,
+ 0x800acff0,
+ 0xf404abc4,
+ 0xb7f11d0b,
+ 0xd7f01900,
+ 0x40becf1c,
+ 0xf400bfcf,
+ 0xb0b70421,
+ 0xe7f00400,
+ 0x00bed001,
+ 0xfc400ad0,
+ 0xfce0fcf0,
+ 0xfcb0fcd0,
+ 0xfc90fca0,
+ 0x0088fe80,
+ 0x32f480fc,
+ 0xf001f800,
+ 0x0e9801f7,
+ 0x04febb00,
+ 0x9418e7f1,
+ 0xf440e3f0,
+ 0x00f88d21,
+ 0x0614e7f1,
+ 0xf006e4b6,
+ 0xefd020f7,
+ 0x08f7f000,
+ 0xf401f2b6,
+ 0xf7f1fd1b,
+ 0xefd00a20,
+ 0xf100f800,
+ 0xb60a0417,
+ 0x1fd00614,
+ 0x0711f400,
+ 0x04a421f5,
+ 0x4afc17f1,
+ 0xf00213f0,
+ 0x12d00c27,
+ 0x0721f500,
+ 0xfc27f102,
+ 0x0223f047,
+ 0xf00020d0,
+ 0x20b6012c,
+ 0x0012d003,
+ 0xf001acf0,
+ 0xb7f002a5,
+ 0x50b3f000,
+ 0xb6000c98,
+ 0xbcbb0fc4,
+ 0x010c9800,
+ 0xf0020d98,
+ 0x21f500e7,
+ 0xacf0015c,
+ 0x04a5f001,
+ 0x4000b7f1,
+ 0x9850b3f0,
+ 0xc4b6000c,
+ 0x00bcbb0f,
+ 0x98050c98,
+ 0x0f98060d,
+ 0x00e7f104,
+ 0x5c21f508,
+ 0x0721f501,
+ 0x0601f402,
+ 0xf11412f4,
+ 0xf04afc17,
+ 0x27f00213,
+ 0x0012d00d,
+ 0x020721f5,
+ 0x048f21f5,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
--- /dev/null
+/* fuc microcode for nvc0 PGRAPH/HUB
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+/* To build:
+ * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
+ */
+
+.section #nvc0_grhub_data
+include(`nvc0_graph.fuc')
+gpc_count: .b32 0
+rop_count: .b32 0
+cmd_queue: queue_init
+hub_mmio_list_head: .b32 0
+hub_mmio_list_tail: .b32 0
+
+ctx_current: .b32 0
+
+chipsets:
+.b8 0xc0 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8 0xc1 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc1_hub_mmio_tail
+.b8 0xc3 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8 0xc4 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8 0xc8 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8 0xce 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8 0xcf 0 0 0
+.b16 #nvc0_hub_mmio_head
+.b16 #nvc0_hub_mmio_tail
+.b8 0xd9 0 0 0
+.b16 #nvd9_hub_mmio_head
+.b16 #nvd9_hub_mmio_tail
+.b8 0 0 0 0
+
+nvc0_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 11)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404174, 3)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 2)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvc0_hub_mmio_tail:
+mmctx_data(0x4064c0, 2)
+nvc1_hub_mmio_tail:
+
+nvd9_hub_mmio_head:
+mmctx_data(0x17e91c, 2)
+mmctx_data(0x400204, 2)
+mmctx_data(0x404004, 10)
+mmctx_data(0x404044, 1)
+mmctx_data(0x404094, 14)
+mmctx_data(0x4040d0, 7)
+mmctx_data(0x4040f8, 1)
+mmctx_data(0x404130, 3)
+mmctx_data(0x404150, 3)
+mmctx_data(0x404164, 2)
+mmctx_data(0x404178, 2)
+mmctx_data(0x404200, 8)
+mmctx_data(0x404404, 14)
+mmctx_data(0x404460, 4)
+mmctx_data(0x404480, 1)
+mmctx_data(0x404498, 1)
+mmctx_data(0x404604, 4)
+mmctx_data(0x404618, 32)
+mmctx_data(0x404698, 21)
+mmctx_data(0x4046f0, 2)
+mmctx_data(0x404700, 22)
+mmctx_data(0x405800, 1)
+mmctx_data(0x405830, 3)
+mmctx_data(0x405854, 1)
+mmctx_data(0x405870, 4)
+mmctx_data(0x405a00, 2)
+mmctx_data(0x405a18, 1)
+mmctx_data(0x406020, 1)
+mmctx_data(0x406028, 4)
+mmctx_data(0x4064a8, 2)
+mmctx_data(0x4064b4, 5)
+mmctx_data(0x407804, 1)
+mmctx_data(0x40780c, 6)
+mmctx_data(0x4078bc, 1)
+mmctx_data(0x408000, 7)
+mmctx_data(0x408064, 1)
+mmctx_data(0x408800, 3)
+mmctx_data(0x408900, 4)
+mmctx_data(0x408980, 1)
+nvd9_hub_mmio_tail:
+
+.align 256
+chan_data:
+chan_mmio_count: .b32 0
+chan_mmio_address: .b32 0
+
+.align 256
+xfer_data: .b32 0
+
+.section #nvc0_grhub_code
+bra #init
+define(`include_code')
+include(`nvc0_graph.fuc')
+
+// reports an exception to the host
+//
+// In: $r15 error code (see nvc0_graph.fuc)
+//
+error:
+ push $r14
+ mov $r14 0x814
+ shl b32 $r14 6
+ iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
+ mov $r14 0xc1c
+ shl b32 $r14 6
+ mov $r15 1
+ iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
+ pop $r14
+ ret
+
+// HUB fuc initialisation, executed by triggering ucode start, will
+// fall through to main loop after completion.
+//
+// Input:
+// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
+//
+// Output:
+// CC_SCRATCH[0]:
+// 31:31: set to signal completion
+// CC_SCRATCH[1]:
+// 31:0: total PGRAPH context size
+//
+init:
+ clear b32 $r0
+ mov $sp $r0
+ mov $xdbase $r0
+
+ // enable fifo access
+ mov $r1 0x1200
+ mov $r2 2
+ iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
+
+ // setup i0 handler, and route all interrupts to it
+ mov $r1 #ih
+ mov $iv0 $r1
+ mov $r1 0x400
+ iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
+
+ // route HUB_CHANNEL_SWITCH to fuc interrupt 8
+ mov $r3 0x404
+ shl b32 $r3 6
+ mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
+ iowr I[$r3 + 0x000] $r2
+
+ // not sure what these are, route them because NVIDIA does, and
+ // the IRQ handler will signal the host if we ever get one.. we
+ // may find out if/why we need to handle these if so..
+ //
+ mov $r2 0x2004
+ iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
+ mov $r2 0x200b
+ iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
+ mov $r2 0x200c
+ iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
+
+ // enable all INTR_UP interrupts
+ mov $r2 0xc24
+ shl b32 $r2 6
+ not b32 $r3 $r0
+ iowr I[$r2] $r3
+
+ // enable fifo, ctxsw, 9, 10, 15 interrupts
+ mov $r2 -0x78fc // 0x8704
+ sethi $r2 0
+ iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
+
+ // fifo level triggered, rest edge
+ sub b32 $r1 0x100
+ mov $r2 4
+ iowr I[$r1] $r2
+
+ // enable interrupts
+ bset $flags ie0
+
+ // fetch enabled GPC/ROP counts
+ mov $r14 -0x69fc // 0x409604
+ sethi $r14 0x400000
+ call #nv_rd32
+ extr $r1 $r15 16:20
+ st b32 D[$r0 + #rop_count] $r1
+ and $r15 0x1f
+ st b32 D[$r0 + #gpc_count] $r15
+
+ // set BAR_REQMASK to GPC mask
+ mov $r1 1
+ shl b32 $r1 $r15
+ sub b32 $r1 1
+ mov $r2 0x40c
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r1
+ iowr I[$r2 + 0x100] $r1
+
+ // find context data for this chipset
+ mov $r2 0x800
+ shl b32 $r2 6
+ iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
+ mov $r15 #chipsets - 8
+ init_find_chipset:
+ add b32 $r15 8
+ ld b32 $r3 D[$r15 + 0x00]
+ cmpu b32 $r3 $r2
+ bra e #init_context
+ cmpu b32 $r3 0
+ bra ne #init_find_chipset
+ // unknown chipset
+ ret
+
+ // context size calculation, reserve first 256 bytes for use by fuc
+ init_context:
+ mov $r1 256
+
+ // calculate size of mmio context data
+ ld b16 $r14 D[$r15 + 4]
+ ld b16 $r15 D[$r15 + 6]
+ sethi $r14 0
+ st b32 D[$r0 + #hub_mmio_list_head] $r14
+ st b32 D[$r0 + #hub_mmio_list_tail] $r15
+ call #mmctx_size
+
+ // set mmctx base addresses now so we don't have to do it later,
+ // they don't (currently) ever change
+ mov $r3 0x700
+ shl b32 $r3 6
+ shr b32 $r4 $r1 8
+ iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
+ iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
+ add b32 $r3 0x1300
+ add b32 $r1 $r15
+ shr b32 $r15 2
+ iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
+
+ // strands, base offset needs to be aligned to 256 bytes
+ shr b32 $r1 8
+ add b32 $r1 1
+ shl b32 $r1 8
+ mov b32 $r15 $r1
+ call #strand_ctx_init
+ add b32 $r1 $r15
+
+ // initialise each GPC in sequence by passing in the offset of its
+ // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
+ // has previously been uploaded by the host) running.
+ //
+ // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
+ // when it has completed, and return the size of its context data
+ // in GPCn_CC_SCRATCH[1]
+ //
+ ld b32 $r3 D[$r0 + #gpc_count]
+ mov $r4 0x2000
+ sethi $r4 0x500000
+ init_gpc:
+ // setup, and start GPC ucode running
+ add b32 $r14 $r4 0x804
+ mov b32 $r15 $r1
+ call #nv_wr32 // CC_SCRATCH[1] = ctx offset
+ add b32 $r14 $r4 0x800
+ mov b32 $r15 $r2
+ call #nv_wr32 // CC_SCRATCH[0] = chipset
+ add b32 $r14 $r4 0x10c
+ clear b32 $r15
+ call #nv_wr32
+ add b32 $r14 $r4 0x104
+ call #nv_wr32 // ENTRY
+ add b32 $r14 $r4 0x100
+ mov $r15 2 // CTRL_START_TRIGGER
+ call #nv_wr32 // CTRL
+
+ // wait for it to complete, and adjust context size
+ add b32 $r14 $r4 0x800
+ init_gpc_wait:
+ call #nv_rd32
+ xbit $r15 $r15 31
+ bra e #init_gpc_wait
+ add b32 $r14 $r4 0x804
+ call #nv_rd32
+ add b32 $r1 $r15
+
+ // next!
+ add b32 $r4 0x8000
+ sub b32 $r3 1
+ bra ne #init_gpc
+
+ // save context size, and tell host we're ready
+ mov $r2 0x800
+ shl b32 $r2 6
+ iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
+ add b32 $r2 0x800
+ clear b32 $r1
+ bset $r1 31
+ iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
+
+// Main program loop, very simple, sleeps until woken up by the interrupt
+// handler, pulls a command from the queue and executes its handler
+//
+main:
+ // sleep until we have something to do
+ bset $flags $p0
+ sleep $p0
+ mov $r13 #cmd_queue
+ call #queue_get
+ bra $p1 #main
+
+ // context switch, requested by GPU?
+ cmpu b32 $r14 0x4001
+ bra ne #main_not_ctx_switch
+ trace_set(T_AUTO)
+ mov $r1 0xb00
+ shl b32 $r1 6
+ iord $r2 I[$r1 + 0x100] // CHAN_NEXT
+ iord $r1 I[$r1 + 0x000] // CHAN_CUR
+
+ xbit $r3 $r1 31
+ bra e #chsw_no_prev
+ xbit $r3 $r2 31
+ bra e #chsw_prev_no_next
+ push $r2
+ mov b32 $r2 $r1
+ trace_set(T_SAVE)
+ bclr $flags $p1
+ bset $flags $p2
+ call #ctx_xfer
+ trace_clr(T_SAVE);
+ pop $r2
+ trace_set(T_LOAD);
+ bset $flags $p1
+ call #ctx_xfer
+ trace_clr(T_LOAD);
+ bra #chsw_done
+ chsw_prev_no_next:
+ push $r2
+ mov b32 $r2 $r1
+ bclr $flags $p1
+ bclr $flags $p2
+ call #ctx_xfer
+ pop $r2
+ mov $r1 0xb00
+ shl b32 $r1 6
+ iowr I[$r1] $r2
+ bra #chsw_done
+ chsw_no_prev:
+ xbit $r3 $r2 31
+ bra e #chsw_done
+ bset $flags $p1
+ bclr $flags $p2
+ call #ctx_xfer
+
+ // ack the context switch request
+ chsw_done:
+ mov $r1 0xb0c
+ shl b32 $r1 6
+ mov $r2 1
+ iowr I[$r1 + 0x000] $r2 // 0x409b0c
+ trace_clr(T_AUTO)
+ bra #main
+
+ // request to set current channel? (*not* a context switch)
+ main_not_ctx_switch:
+ cmpu b32 $r14 0x0001
+ bra ne #main_not_ctx_chan
+ mov b32 $r2 $r15
+ call #ctx_chan
+ bra #main_done
+
+ // request to store current channel context?
+ main_not_ctx_chan:
+ cmpu b32 $r14 0x0002
+ bra ne #main_not_ctx_save
+ trace_set(T_SAVE)
+ bclr $flags $p1
+ bclr $flags $p2
+ call #ctx_xfer
+ trace_clr(T_SAVE)
+ bra #main_done
+
+ main_not_ctx_save:
+ shl b32 $r15 $r14 16
+ or $r15 E_BAD_COMMAND
+ call #error
+ bra #main
+
+ main_done:
+ mov $r1 0x820
+ shl b32 $r1 6
+ clear b32 $r2
+ bset $r2 31
+ iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
+ bra #main
+
+// interrupt handler
+ih:
+ push $r8
+ mov $r8 $flags
+ push $r8
+ push $r9
+ push $r10
+ push $r11
+ push $r13
+ push $r14
+ push $r15
+
+ // incoming fifo command?
+ iord $r10 I[$r0 + 0x200] // INTR
+ and $r11 $r10 0x00000004
+ bra e #ih_no_fifo
+ // queue incoming fifo command for later processing
+ mov $r11 0x1900
+ mov $r13 #cmd_queue
+ iord $r14 I[$r11 + 0x100] // FIFO_CMD
+ iord $r15 I[$r11 + 0x000] // FIFO_DATA
+ call #queue_put
+ add b32 $r11 0x400
+ mov $r14 1
+ iowr I[$r11 + 0x000] $r14 // FIFO_ACK
+
+ // context switch request?
+ ih_no_fifo:
+ and $r11 $r10 0x00000100
+ bra e #ih_no_ctxsw
+ // enqueue a context switch for later processing
+ mov $r13 #cmd_queue
+ mov $r14 0x4001
+ call #queue_put
+
+ // anything we didn't handle, bring it to the host's attention
+ ih_no_ctxsw:
+ mov $r11 0x104
+ not b32 $r11
+ and $r11 $r10 $r11
+ bra e #ih_no_other
+ mov $r10 0xc1c
+ shl b32 $r10 6
+ iowr I[$r10] $r11 // INTR_UP_SET
+
+ // ack, and wake up main()
+ ih_no_other:
+ iowr I[$r0 + 0x100] $r10 // INTR_ACK
+
+ pop $r15
+ pop $r14
+ pop $r13
+ pop $r11
+ pop $r10
+ pop $r9
+ pop $r8
+ mov $flags $r8
+ pop $r8
+ bclr $flags $p0
+ iret
+
+// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
+ctx_4160s:
+ mov $r14 0x4160
+ sethi $r14 0x400000
+ mov $r15 1
+ call #nv_wr32
+ ctx_4160s_wait:
+ call #nv_rd32
+ xbit $r15 $r15 4
+ bra e #ctx_4160s_wait
+ ret
+
+// Without clearing again at end of xfer, some things cause PGRAPH
+// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
+// still function with it set however...
+ctx_4160c:
+ mov $r14 0x4160
+ sethi $r14 0x400000
+ clear b32 $r15
+ call #nv_wr32
+ ret
+
+// Again, not real sure
+//
+// In: $r15 value to set 0x404170 to
+//
+ctx_4170s:
+ mov $r14 0x4170
+ sethi $r14 0x400000
+ or $r15 0x10
+ call #nv_wr32
+ ret
+
+// Waits for a ctx_4170s() call to complete
+//
+ctx_4170w:
+ mov $r14 0x4170
+ sethi $r14 0x400000
+ call #nv_rd32
+ and $r15 0x10
+ bra ne #ctx_4170w
+ ret
+
+// Disables various things, waits a bit, and re-enables them..
+//
+// Not sure how exactly this helps, perhaps "ENABLE" is not such a
+// good description for the bits we turn off? Anyways, without this,
+// funny things happen.
+//
+ctx_redswitch:
+ mov $r14 0x614
+ shl b32 $r14 6
+ mov $r15 0x270
+ iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
+ mov $r15 8
+ ctx_redswitch_delay:
+ sub b32 $r15 1
+ bra ne #ctx_redswitch_delay
+ mov $r15 0x770
+ iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
+ ret
+
+// Not a clue what this is for, except that unless the value is 0x10, the
+// strand context is saved (and presumably restored) incorrectly..
+//
+// In: $r15 value to set to (0x00/0x10 are used)
+//
+ctx_86c:
+ mov $r14 0x86c
+ shl b32 $r14 6
+ iowr I[$r14] $r15 // HUB(0x86c) = val
+ mov $r14 -0x75ec
+ sethi $r14 0x400000
+ call #nv_wr32 // ROP(0xa14) = val
+ mov $r14 -0x5794
+ sethi $r14 0x410000
+ call #nv_wr32 // GPC(0x86c) = val
+ ret
+
+// ctx_load - load's a channel's ctxctl data, and selects its vm
+//
+// In: $r2 channel address
+//
+ctx_load:
+ trace_set(T_CHAN)
+
+ // switch to channel, somewhat magic in parts..
+ mov $r10 12 // DONE_UNK12
+ call #wait_donez
+ mov $r1 0xa24
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r0 // 0x409a24
+ mov $r3 0xb00
+ shl b32 $r3 6
+ iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
+ mov $r1 0xa0c
+ shl b32 $r1 6
+ mov $r4 7
+ iowr I[$r1 + 0x000] $r2 // MEM_CHAN
+ iowr I[$r1 + 0x100] $r4 // MEM_CMD
+ ctx_chan_wait_0:
+ iord $r4 I[$r1 + 0x100]
+ and $r4 0x1f
+ bra ne #ctx_chan_wait_0
+ iowr I[$r3 + 0x000] $r2 // CHAN_CUR
+
+ // load channel header, fetch PGRAPH context pointer
+ mov $xtargets $r0
+ bclr $r2 31
+ shl b32 $r2 4
+ add b32 $r2 2
+
+ trace_set(T_LCHAN)
+ mov $r1 0xa04
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r2 // MEM_BASE
+ mov $r1 0xa20
+ shl b32 $r1 6
+ mov $r2 0x0002
+ sethi $r2 0x80000000
+ iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
+ mov $r1 0x10 // chan + 0x0210
+ mov $r2 #xfer_data
+ sethi $r2 0x00020000 // 16 bytes
+ xdld $r1 $r2
+ xdwait
+ trace_clr(T_LCHAN)
+
+ // update current context
+ ld b32 $r1 D[$r0 + #xfer_data + 4]
+ shl b32 $r1 24
+ ld b32 $r2 D[$r0 + #xfer_data + 0]
+ shr b32 $r2 8
+ or $r1 $r2
+ st b32 D[$r0 + #ctx_current] $r1
+
+ // set transfer base to start of context, and fetch context header
+ trace_set(T_LCTXH)
+ mov $r2 0xa04
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r1 // MEM_BASE
+ mov $r2 1
+ mov $r1 0xa20
+ shl b32 $r1 6
+ iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
+ mov $r1 #chan_data
+ sethi $r1 0x00060000 // 256 bytes
+ xdld $r0 $r1
+ xdwait
+ trace_clr(T_LCTXH)
+
+ trace_clr(T_CHAN)
+ ret
+
+// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
+// the active channel for ctxctl, but not actually transfer
+// any context data. intended for use only during initial
+// context construction.
+//
+// In: $r2 channel address
+//
+ctx_chan:
+ call #ctx_4160s
+ call #ctx_load
+ mov $r10 12 // DONE_UNK12
+ call #wait_donez
+ mov $r1 0xa10
+ shl b32 $r1 6
+ mov $r2 5
+ iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
+ ctx_chan_wait:
+ iord $r2 I[$r1 + 0x000]
+ or $r2 $r2
+ bra ne #ctx_chan_wait
+ call #ctx_4160c
+ ret
+
+// Execute per-context state overrides list
+//
+// Only executed on the first load of a channel. Might want to look into
+// removing this and having the host directly modify the channel's context
+// to change this state... The nouveau DRM already builds this list as
+// it's definitely needed for NVIDIA's, so we may as well use it for now
+//
+// Input: $r1 mmio list length
+//
+ctx_mmio_exec:
+ // set transfer base to be the mmio list
+ ld b32 $r3 D[$r0 + #chan_mmio_address]
+ mov $r2 0xa04
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r3 // MEM_BASE
+
+ clear b32 $r3
+ ctx_mmio_loop:
+ // fetch next 256 bytes of mmio list if necessary
+ and $r4 $r3 0xff
+ bra ne #ctx_mmio_pull
+ mov $r5 #xfer_data
+ sethi $r5 0x00060000 // 256 bytes
+ xdld $r3 $r5
+ xdwait
+
+ // execute a single list entry
+ ctx_mmio_pull:
+ ld b32 $r14 D[$r4 + #xfer_data + 0x00]
+ ld b32 $r15 D[$r4 + #xfer_data + 0x04]
+ call #nv_wr32
+
+ // next!
+ add b32 $r3 8
+ sub b32 $r1 1
+ bra ne #ctx_mmio_loop
+
+ // set transfer base back to the current context
+ ctx_mmio_done:
+ ld b32 $r3 D[$r0 + #ctx_current]
+ iowr I[$r2 + 0x000] $r3 // MEM_BASE
+
+ // disable the mmio list now, we don't need/want to execute it again
+ st b32 D[$r0 + #chan_mmio_count] $r0
+ mov $r1 #chan_data
+ sethi $r1 0x00060000 // 256 bytes
+ xdst $r0 $r1
+ xdwait
+ ret
+
+// Transfer HUB context data between GPU and storage area
+//
+// In: $r2 channel address
+// $p1 clear on save, set on load
+// $p2 set if opposite direction done/will be done, so:
+// on save it means: "a load will follow this save"
+// on load it means: "a save preceeded this load"
+//
+ctx_xfer:
+ bra not $p1 #ctx_xfer_pre
+ bra $p2 #ctx_xfer_pre_load
+ ctx_xfer_pre:
+ mov $r15 0x10
+ call #ctx_86c
+ call #ctx_4160s
+ bra not $p1 #ctx_xfer_exec
+
+ ctx_xfer_pre_load:
+ mov $r15 2
+ call #ctx_4170s
+ call #ctx_4170w
+ call #ctx_redswitch
+ clear b32 $r15
+ call #ctx_4170s
+ call #ctx_load
+
+ // fetch context pointer, and initiate xfer on all GPCs
+ ctx_xfer_exec:
+ ld b32 $r1 D[$r0 + #ctx_current]
+ mov $r2 0x414
+ shl b32 $r2 6
+ iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
+ mov $r14 -0x5b00
+ sethi $r14 0x410000
+ mov b32 $r15 $r1
+ call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
+ add b32 $r14 4
+ xbit $r15 $flags $p1
+ xbit $r2 $flags $p2
+ shl b32 $r2 1
+ or $r15 $r2
+ call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
+
+ // strands
+ mov $r1 0x4afc
+ sethi $r1 0x20000
+ mov $r2 0xc
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
+ call #strand_wait
+ mov $r2 0x47fc
+ sethi $r2 0x20000
+ iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
+ xbit $r2 $flags $p1
+ add b32 $r2 3
+ iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
+
+ // mmio context
+ xbit $r10 $flags $p1 // direction
+ or $r10 6 // first, last
+ mov $r11 0 // base = 0
+ ld b32 $r12 D[$r0 + #hub_mmio_list_head]
+ ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
+ mov $r14 0 // not multi
+ call #mmctx_xfer
+
+ // wait for GPCs to all complete
+ mov $r10 8 // DONE_BAR
+ call #wait_doneo
+
+ // wait for strand xfer to complete
+ call #strand_wait
+
+ // post-op
+ bra $p1 #ctx_xfer_post
+ mov $r10 12 // DONE_UNK12
+ call #wait_donez
+ mov $r1 0xa10
+ shl b32 $r1 6
+ mov $r2 5
+ iowr I[$r1] $r2 // MEM_CMD
+ ctx_xfer_post_save_wait:
+ iord $r2 I[$r1]
+ or $r2 $r2
+ bra ne #ctx_xfer_post_save_wait
+
+ bra $p2 #ctx_xfer_done
+ ctx_xfer_post:
+ mov $r15 2
+ call #ctx_4170s
+ clear b32 $r15
+ call #ctx_86c
+ call #strand_post
+ call #ctx_4170w
+ clear b32 $r15
+ call #ctx_4170s
+
+ bra not $p1 #ctx_xfer_no_post_mmio
+ ld b32 $r1 D[$r0 + #chan_mmio_count]
+ or $r1 $r1
+ bra e #ctx_xfer_no_post_mmio
+ call #ctx_mmio_exec
+
+ ctx_xfer_no_post_mmio:
+ call #ctx_4160c
+
+ ctx_xfer_done:
+ ret
+
+.align 256
--- /dev/null
+uint32_t nvc0_grhub_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x000000c0,
+ 0x013c00a0,
+ 0x000000c1,
+ 0x014000a0,
+ 0x000000c3,
+ 0x013c00a0,
+ 0x000000c4,
+ 0x013c00a0,
+ 0x000000c8,
+ 0x013c00a0,
+ 0x000000ce,
+ 0x013c00a0,
+ 0x000000cf,
+ 0x013c00a0,
+ 0x000000d9,
+ 0x01dc0140,
+ 0x00000000,
+ 0x0417e91c,
+ 0x04400204,
+ 0x28404004,
+ 0x00404044,
+ 0x34404094,
+ 0x184040d0,
+ 0x004040f8,
+ 0x08404130,
+ 0x08404150,
+ 0x04404164,
+ 0x08404174,
+ 0x1c404200,
+ 0x34404404,
+ 0x0c404460,
+ 0x00404480,
+ 0x00404498,
+ 0x0c404604,
+ 0x7c404618,
+ 0x50404698,
+ 0x044046f0,
+ 0x54404700,
+ 0x00405800,
+ 0x08405830,
+ 0x00405854,
+ 0x0c405870,
+ 0x04405a00,
+ 0x00405a18,
+ 0x00406020,
+ 0x0c406028,
+ 0x044064a8,
+ 0x044064b4,
+ 0x00407804,
+ 0x1440780c,
+ 0x004078bc,
+ 0x18408000,
+ 0x00408064,
+ 0x08408800,
+ 0x0c408900,
+ 0x00408980,
+ 0x044064c0,
+ 0x0417e91c,
+ 0x04400204,
+ 0x24404004,
+ 0x00404044,
+ 0x34404094,
+ 0x184040d0,
+ 0x004040f8,
+ 0x08404130,
+ 0x08404150,
+ 0x04404164,
+ 0x04404178,
+ 0x1c404200,
+ 0x34404404,
+ 0x0c404460,
+ 0x00404480,
+ 0x00404498,
+ 0x0c404604,
+ 0x7c404618,
+ 0x50404698,
+ 0x044046f0,
+ 0x54404700,
+ 0x00405800,
+ 0x08405830,
+ 0x00405854,
+ 0x0c405870,
+ 0x04405a00,
+ 0x00405a18,
+ 0x00406020,
+ 0x0c406028,
+ 0x044064a8,
+ 0x104064b4,
+ 0x00407804,
+ 0x1440780c,
+ 0x004078bc,
+ 0x18408000,
+ 0x00408064,
+ 0x08408800,
+ 0x0c408900,
+ 0x00408980,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
+
+uint32_t nvc0_grhub_code[] = {
+ 0x03090ef5,
+ 0x9800d898,
+ 0x86f001d9,
+ 0x0489b808,
+ 0xf00c1bf4,
+ 0x21f502f7,
+ 0x00f802ec,
+ 0xb60798c4,
+ 0x8dbb0384,
+ 0x0880b600,
+ 0x80008e80,
+ 0x90b6018f,
+ 0x0f94f001,
+ 0xf801d980,
+ 0x0131f400,
+ 0x9800d898,
+ 0x89b801d9,
+ 0x210bf404,
+ 0xb60789c4,
+ 0x9dbb0394,
+ 0x0890b600,
+ 0x98009e98,
+ 0x80b6019f,
+ 0x0f84f001,
+ 0xf400d880,
+ 0x00f80132,
+ 0x0728b7f1,
+ 0xb906b4b6,
+ 0xc9f002ec,
+ 0x00bcd01f,
+ 0xc800bccf,
+ 0x1bf41fcc,
+ 0x06a7f0fa,
+ 0x010321f5,
+ 0xf840bfcf,
+ 0x28b7f100,
+ 0x06b4b607,
+ 0xb980bfd0,
+ 0xc9f002ec,
+ 0x1ec9f01f,
+ 0xcf00bcd0,
+ 0xccc800bc,
+ 0xfa1bf41f,
+ 0x87f100f8,
+ 0x84b60430,
+ 0x1ff9f006,
+ 0xf8008fd0,
+ 0x3087f100,
+ 0x0684b604,
+ 0xf80080d0,
+ 0x3c87f100,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d000,
+ 0x081887f1,
+ 0xd00684b6,
+ 0x87f1008a,
+ 0x84b60400,
+ 0x0088cf06,
+ 0xf4888aff,
+ 0x87f1f31b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00099,
+ 0xf100f800,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00099f0,
+ 0x87f10089,
+ 0x84b60818,
+ 0x008ad006,
+ 0x040087f1,
+ 0xcf0684b6,
+ 0x8aff0088,
+ 0xf30bf488,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0099f094,
+ 0xf80089d0,
+ 0x9894bd00,
+ 0x85b600e8,
+ 0x0180b61a,
+ 0xbb0284b6,
+ 0xe0b60098,
+ 0x04efb804,
+ 0xb9eb1bf4,
+ 0x00f8029f,
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0199f094,
+ 0xf10089d0,
+ 0xb6071087,
+ 0x94bd0684,
+ 0xf405bbfd,
+ 0x8bd0090b,
+ 0x0099f000,
+ 0xf405eefd,
+ 0x8ed00c0b,
+ 0xc08fd080,
+ 0xb70199f0,
+ 0xc8010080,
+ 0xb4b600ab,
+ 0x0cb9f010,
+ 0xb601aec8,
+ 0xbefd11e4,
+ 0x008bd005,
+ 0xf0008ecf,
+ 0x0bf41fe4,
+ 0x00ce98fa,
+ 0xd005e9fd,
+ 0xc0b6c08e,
+ 0x04cdb804,
+ 0xc8e81bf4,
+ 0x1bf402ab,
+ 0x008bcf18,
+ 0xb01fb4f0,
+ 0x1bf410b4,
+ 0x02a7f0f7,
+ 0xf4c921f4,
+ 0xabc81b0e,
+ 0x10b4b600,
+ 0xf00cb9f0,
+ 0x8bd012b9,
+ 0x008bcf00,
+ 0xf412bbc8,
+ 0x87f1fa1b,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00199,
+ 0xf900f800,
+ 0x02a7f0a0,
+ 0xfcc921f4,
+ 0xf100f8a0,
+ 0xf04afc87,
+ 0x97f00283,
+ 0x0089d00c,
+ 0x020721f5,
+ 0x87f100f8,
+ 0x83f04afc,
+ 0x0d97f002,
+ 0xf50089d0,
+ 0xf8020721,
+ 0xfca7f100,
+ 0x02a3f04f,
+ 0x0500aba2,
+ 0xd00fc7f0,
+ 0xc7f000ac,
+ 0x00bcd00b,
+ 0x020721f5,
+ 0xf000aed0,
+ 0xbcd00ac7,
+ 0x0721f500,
+ 0xf100f802,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x21f50089,
+ 0xe7f00213,
+ 0x3921f503,
+ 0xfca7f102,
+ 0x02a3f046,
+ 0x0400aba0,
+ 0xf040a0d0,
+ 0xbcd001c7,
+ 0x0721f500,
+ 0x010c9202,
+ 0xf000acd0,
+ 0xbcd002c7,
+ 0x0721f500,
+ 0x2621f502,
+ 0x8087f102,
+ 0x0684b608,
+ 0xb70089cf,
+ 0x95220080,
+ 0x8ed008fe,
+ 0x408ed000,
+ 0xb6808acf,
+ 0xa0b606a5,
+ 0x00eabb01,
+ 0xb60480b6,
+ 0x1bf40192,
+ 0x08e4b6e8,
+ 0xf1f2efbc,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00399f0,
+ 0x00f80089,
+ 0xe7f1e0f9,
+ 0xe4b60814,
+ 0x00efd006,
+ 0x0c1ce7f1,
+ 0xf006e4b6,
+ 0xefd001f7,
+ 0xf8e0fc00,
+ 0xfe04bd00,
+ 0x07fe0004,
+ 0x0017f100,
+ 0x0227f012,
+ 0xf10012d0,
+ 0xfe05b917,
+ 0x17f10010,
+ 0x10d00400,
+ 0x0437f1c0,
+ 0x0634b604,
+ 0x200327f1,
+ 0xf10032d0,
+ 0xd0200427,
+ 0x27f10132,
+ 0x32d0200b,
+ 0x0c27f102,
+ 0x0732d020,
+ 0x0c2427f1,
+ 0xb90624b6,
+ 0x23d00003,
+ 0x0427f100,
+ 0x0023f087,
+ 0xb70012d0,
+ 0xf0010012,
+ 0x12d00427,
+ 0x1031f400,
+ 0x9604e7f1,
+ 0xf440e3f0,
+ 0xf1c76821,
+ 0x01018090,
+ 0x801ff4f0,
+ 0x17f0000f,
+ 0x041fbb01,
+ 0xf10112b6,
+ 0xb6040c27,
+ 0x21d00624,
+ 0x4021d000,
+ 0x080027f1,
+ 0xcf0624b6,
+ 0xf7f00022,
+ 0x08f0b654,
+ 0xb800f398,
+ 0x0bf40432,
+ 0x0034b00b,
+ 0xf8f11bf4,
+ 0x0017f100,
+ 0x02fe5801,
+ 0xf003ff58,
+ 0x0e8000e3,
+ 0x150f8014,
+ 0x013d21f5,
+ 0x070037f1,
+ 0x950634b6,
+ 0x34d00814,
+ 0x4034d000,
+ 0x130030b7,
+ 0xb6001fbb,
+ 0x3fd002f5,
+ 0x0815b600,
+ 0xb60110b6,
+ 0x1fb90814,
+ 0x6321f502,
+ 0x001fbb02,
+ 0xf1000398,
+ 0xf0200047,
+ 0x4ea05043,
+ 0x1fb90804,
+ 0x8d21f402,
+ 0x08004ea0,
+ 0xf4022fb9,
+ 0x4ea08d21,
+ 0xf4bd010c,
+ 0xa08d21f4,
+ 0xf401044e,
+ 0x4ea08d21,
+ 0xf7f00100,
+ 0x8d21f402,
+ 0x08004ea0,
+ 0xc86821f4,
+ 0x0bf41fff,
+ 0x044ea0fa,
+ 0x6821f408,
+ 0xb7001fbb,
+ 0xb6800040,
+ 0x1bf40132,
+ 0x0027f1b4,
+ 0x0624b608,
+ 0xb74021d0,
+ 0xbd080020,
+ 0x1f19f014,
+ 0xf40021d0,
+ 0x28f40031,
+ 0x08d7f000,
+ 0xf43921f4,
+ 0xe4b1f401,
+ 0x1bf54001,
+ 0x87f100d1,
+ 0x84b6083c,
+ 0xf094bd06,
+ 0x89d00499,
+ 0x0017f100,
+ 0x0614b60b,
+ 0xcf4012cf,
+ 0x13c80011,
+ 0x7e0bf41f,
+ 0xf41f23c8,
+ 0x20f95a0b,
+ 0xf10212b9,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00799f0,
+ 0x32f40089,
+ 0x0231f401,
+ 0x082921f5,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0799f094,
+ 0xfc0089d0,
+ 0x3c87f120,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d006,
+ 0xf50131f4,
+ 0xf1082921,
+ 0xb6085c87,
+ 0x94bd0684,
+ 0xd00699f0,
+ 0x0ef40089,
+ 0xb920f931,
+ 0x32f40212,
+ 0x0232f401,
+ 0x082921f5,
+ 0x17f120fc,
+ 0x14b60b00,
+ 0x0012d006,
+ 0xc8130ef4,
+ 0x0bf41f23,
+ 0x0131f40d,
+ 0xf50232f4,
+ 0xf1082921,
+ 0xb60b0c17,
+ 0x27f00614,
+ 0x0012d001,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0499f094,
+ 0xf50089d0,
+ 0xb0ff200e,
+ 0x1bf401e4,
+ 0x02f2b90d,
+ 0x07b521f5,
+ 0xb0420ef4,
+ 0x1bf402e4,
+ 0x3c87f12e,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d007,
+ 0xf40132f4,
+ 0x21f50232,
+ 0x87f10829,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00799,
+ 0x110ef400,
+ 0xf010ef94,
+ 0x21f501f5,
+ 0x0ef502ec,
+ 0x17f1fed1,
+ 0x14b60820,
+ 0xf024bd06,
+ 0x12d01f29,
+ 0xbe0ef500,
+ 0xfe80f9fe,
+ 0x80f90188,
+ 0xa0f990f9,
+ 0xd0f9b0f9,
+ 0xf0f9e0f9,
+ 0xc4800acf,
+ 0x0bf404ab,
+ 0x00b7f11d,
+ 0x08d7f019,
+ 0xcf40becf,
+ 0x21f400bf,
+ 0x00b0b704,
+ 0x01e7f004,
+ 0xe400bed0,
+ 0xf40100ab,
+ 0xd7f00d0b,
+ 0x01e7f108,
+ 0x0421f440,
+ 0x0104b7f1,
+ 0xabffb0bd,
+ 0x0d0bf4b4,
+ 0x0c1ca7f1,
+ 0xd006a4b6,
+ 0x0ad000ab,
+ 0xfcf0fc40,
+ 0xfcd0fce0,
+ 0xfca0fcb0,
+ 0xfe80fc90,
+ 0x80fc0088,
+ 0xf80032f4,
+ 0x60e7f101,
+ 0x40e3f041,
+ 0xf401f7f0,
+ 0x21f48d21,
+ 0x04ffc868,
+ 0xf8fa0bf4,
+ 0x60e7f100,
+ 0x40e3f041,
+ 0x21f4f4bd,
+ 0xf100f88d,
+ 0xf04170e7,
+ 0xf5f040e3,
+ 0x8d21f410,
+ 0xe7f100f8,
+ 0xe3f04170,
+ 0x6821f440,
+ 0xf410f4f0,
+ 0x00f8f31b,
+ 0x0614e7f1,
+ 0xf106e4b6,
+ 0xd00270f7,
+ 0xf7f000ef,
+ 0x01f2b608,
+ 0xf1fd1bf4,
+ 0xd00770f7,
+ 0x00f800ef,
+ 0x086ce7f1,
+ 0xd006e4b6,
+ 0xe7f100ef,
+ 0xe3f08a14,
+ 0x8d21f440,
+ 0xa86ce7f1,
+ 0xf441e3f0,
+ 0x00f88d21,
+ 0x083c87f1,
+ 0xbd0684b6,
+ 0x0599f094,
+ 0xf00089d0,
+ 0x21f40ca7,
+ 0x2417f1c9,
+ 0x0614b60a,
+ 0xf10010d0,
+ 0xb60b0037,
+ 0x32d00634,
+ 0x0c17f140,
+ 0x0614b60a,
+ 0xd00747f0,
+ 0x14d00012,
+ 0x4014cf40,
+ 0xf41f44f0,
+ 0x32d0fa1b,
+ 0x000bfe00,
+ 0xb61f2af0,
+ 0x20b60424,
+ 0x3c87f102,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d008,
+ 0x0a0417f1,
+ 0xd00614b6,
+ 0x17f10012,
+ 0x14b60a20,
+ 0x0227f006,
+ 0x800023f1,
+ 0xf00012d0,
+ 0x27f11017,
+ 0x23f00300,
+ 0x0512fa02,
+ 0x87f103f8,
+ 0x84b6085c,
+ 0xf094bd06,
+ 0x89d00899,
+ 0xc1019800,
+ 0x981814b6,
+ 0x25b6c002,
+ 0x0512fd08,
+ 0xf1160180,
+ 0xb6083c87,
+ 0x94bd0684,
+ 0xd00999f0,
+ 0x27f10089,
+ 0x24b60a04,
+ 0x0021d006,
+ 0xf10127f0,
+ 0xb60a2017,
+ 0x12d00614,
+ 0x0017f100,
+ 0x0613f002,
+ 0xf80501fa,
+ 0x5c87f103,
+ 0x0684b608,
+ 0x99f094bd,
+ 0x0089d009,
+ 0x085c87f1,
+ 0xbd0684b6,
+ 0x0599f094,
+ 0xf80089d0,
+ 0x3121f500,
+ 0xb821f506,
+ 0x0ca7f006,
+ 0xf1c921f4,
+ 0xb60a1017,
+ 0x27f00614,
+ 0x0012d005,
+ 0xfd0012cf,
+ 0x1bf40522,
+ 0x4921f5fa,
+ 0x9800f806,
+ 0x27f18103,
+ 0x24b60a04,
+ 0x0023d006,
+ 0x34c434bd,
+ 0x0f1bf4ff,
+ 0x030057f1,
+ 0xfa0653f0,
+ 0x03f80535,
+ 0x98c04e98,
+ 0x21f4c14f,
+ 0x0830b68d,
+ 0xf40112b6,
+ 0x0398df1b,
+ 0x0023d016,
+ 0xf1800080,
+ 0xf0020017,
+ 0x01fa0613,
+ 0xf803f806,
+ 0x0611f400,
+ 0xf01102f4,
+ 0x21f510f7,
+ 0x21f50698,
+ 0x11f40631,
+ 0x02f7f01c,
+ 0x065721f5,
+ 0x066621f5,
+ 0x067821f5,
+ 0x21f5f4bd,
+ 0x21f50657,
+ 0x019806b8,
+ 0x1427f116,
+ 0x0624b604,
+ 0xf10020d0,
+ 0xf0a500e7,
+ 0x1fb941e3,
+ 0x8d21f402,
+ 0xf004e0b6,
+ 0x2cf001fc,
+ 0x0124b602,
+ 0xf405f2fd,
+ 0x17f18d21,
+ 0x13f04afc,
+ 0x0c27f002,
+ 0xf50012d0,
+ 0xf1020721,
+ 0xf047fc27,
+ 0x20d00223,
+ 0x012cf000,
+ 0xd00320b6,
+ 0xacf00012,
+ 0x06a5f001,
+ 0x9800b7f0,
+ 0x0d98140c,
+ 0x00e7f015,
+ 0x015c21f5,
+ 0xf508a7f0,
+ 0xf5010321,
+ 0xf4020721,
+ 0xa7f02201,
+ 0xc921f40c,
+ 0x0a1017f1,
+ 0xf00614b6,
+ 0x12d00527,
+ 0x0012cf00,
+ 0xf40522fd,
+ 0x02f4fa1b,
+ 0x02f7f032,
+ 0x065721f5,
+ 0x21f5f4bd,
+ 0x21f50698,
+ 0x21f50226,
+ 0xf4bd0666,
+ 0x065721f5,
+ 0x981011f4,
+ 0x11fd8001,
+ 0x070bf405,
+ 0x07df21f5,
+ 0x064921f5,
+ 0x000000f8,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+};
--- /dev/null
+/* fuc microcode util functions for nvc0 PGRAPH
+ *
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
+define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
+
+ifdef(`include_code', `
+// Error codes
+define(`E_BAD_COMMAND', 0x01)
+define(`E_CMD_OVERFLOW', 0x02)
+
+// Util macros to help with debugging ucode hangs etc
+define(`T_WAIT', 0)
+define(`T_MMCTX', 1)
+define(`T_STRWAIT', 2)
+define(`T_STRINIT', 3)
+define(`T_AUTO', 4)
+define(`T_CHAN', 5)
+define(`T_LOAD', 6)
+define(`T_SAVE', 7)
+define(`T_LCHAN', 8)
+define(`T_LCTXH', 9)
+
+define(`trace_set', `
+ mov $r8 0x83c
+ shl b32 $r8 6
+ clear b32 $r9
+ bset $r9 $1
+ iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
+')
+
+define(`trace_clr', `
+ mov $r8 0x85c
+ shl b32 $r8 6
+ clear b32 $r9
+ bset $r9 $1
+ iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
+')
+
+// queue_put - add request to queue
+//
+// In : $r13 queue pointer
+// $r14 command
+// $r15 data
+//
+queue_put:
+ // make sure we have space..
+ ld b32 $r8 D[$r13 + 0x0] // GET
+ ld b32 $r9 D[$r13 + 0x4] // PUT
+ xor $r8 8
+ cmpu b32 $r8 $r9
+ bra ne #queue_put_next
+ mov $r15 E_CMD_OVERFLOW
+ call #error
+ ret
+
+ // store cmd/data on queue
+ queue_put_next:
+ and $r8 $r9 7
+ shl b32 $r8 3
+ add b32 $r8 $r13
+ add b32 $r8 8
+ st b32 D[$r8 + 0x0] $r14
+ st b32 D[$r8 + 0x4] $r15
+
+ // update PUT
+ add b32 $r9 1
+ and $r9 0xf
+ st b32 D[$r13 + 0x4] $r9
+ ret
+
+// queue_get - fetch request from queue
+//
+// In : $r13 queue pointer
+//
+// Out: $p1 clear on success (data available)
+// $r14 command
+// $r15 data
+//
+queue_get:
+ bset $flags $p1
+ ld b32 $r8 D[$r13 + 0x0] // GET
+ ld b32 $r9 D[$r13 + 0x4] // PUT
+ cmpu b32 $r8 $r9
+ bra e #queue_get_done
+ // fetch first cmd/data pair
+ and $r9 $r8 7
+ shl b32 $r9 3
+ add b32 $r9 $r13
+ add b32 $r9 8
+ ld b32 $r14 D[$r9 + 0x0]
+ ld b32 $r15 D[$r9 + 0x4]
+
+ // update GET
+ add b32 $r8 1
+ and $r8 0xf
+ st b32 D[$r13 + 0x0] $r8
+ bclr $flags $p1
+queue_get_done:
+ ret
+
+// nv_rd32 - read 32-bit value from nv register
+//
+// In : $r14 register
+// Out: $r15 value
+//
+nv_rd32:
+ mov $r11 0x728
+ shl b32 $r11 6
+ mov b32 $r12 $r14
+ bset $r12 31 // MMIO_CTRL_PENDING
+ iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_rd32_wait:
+ iord $r12 I[$r11 + 0x000]
+ xbit $r12 $r12 31
+ bra ne #nv_rd32_wait
+ mov $r10 6 // DONE_MMIO_RD
+ call #wait_doneo
+ iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
+ ret
+
+// nv_wr32 - write 32-bit value to nv register
+//
+// In : $r14 register
+// $r15 value
+//
+nv_wr32:
+ mov $r11 0x728
+ shl b32 $r11 6
+ iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
+ mov b32 $r12 $r14
+ bset $r12 31 // MMIO_CTRL_PENDING
+ bset $r12 30 // MMIO_CTRL_WRITE
+ iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
+ nv_wr32_wait:
+ iord $r12 I[$r11 + 0x000]
+ xbit $r12 $r12 31
+ bra ne #nv_wr32_wait
+ ret
+
+// (re)set watchdog timer
+//
+// In : $r15 timeout
+//
+watchdog_reset:
+ mov $r8 0x430
+ shl b32 $r8 6
+ bset $r15 31
+ iowr I[$r8 + 0x000] $r15
+ ret
+
+// clear watchdog timer
+watchdog_clear:
+ mov $r8 0x430
+ shl b32 $r8 6
+ iowr I[$r8 + 0x000] $r0
+ ret
+
+// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
+//
+// In : $r10 bit to wait on
+//
+define(`wait_done', `
+$1:
+ trace_set(T_WAIT);
+ mov $r8 0x818
+ shl b32 $r8 6
+ iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
+ wait_done_$1:
+ mov $r8 0x400
+ shl b32 $r8 6
+ iord $r8 I[$r8 + 0x000] // DONE
+ xbit $r8 $r8 $r10
+ bra $2 #wait_done_$1
+ trace_clr(T_WAIT)
+ ret
+')
+wait_done(wait_donez, ne)
+wait_done(wait_doneo, e)
+
+// mmctx_size - determine size of a mmio list transfer
+//
+// In : $r14 mmio list head
+// $r15 mmio list tail
+// Out: $r15 transfer size (in bytes)
+//
+mmctx_size:
+ clear b32 $r9
+ nv_mmctx_size_loop:
+ ld b32 $r8 D[$r14]
+ shr b32 $r8 26
+ add b32 $r8 1
+ shl b32 $r8 2
+ add b32 $r9 $r8
+ add b32 $r14 4
+ cmpu b32 $r14 $r15
+ bra ne #nv_mmctx_size_loop
+ mov b32 $r15 $r9
+ ret
+
+// mmctx_xfer - execute a list of mmio transfers
+//
+// In : $r10 flags
+// bit 0: direction (0 = save, 1 = load)
+// bit 1: set if first transfer
+// bit 2: set if last transfer
+// $r11 base
+// $r12 mmio list head
+// $r13 mmio list tail
+// $r14 multi_stride
+// $r15 multi_mask
+//
+mmctx_xfer:
+ trace_set(T_MMCTX)
+ mov $r8 0x710
+ shl b32 $r8 6
+ clear b32 $r9
+ or $r11 $r11
+ bra e #mmctx_base_disabled
+ iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
+ bset $r9 0 // BASE_EN
+ mmctx_base_disabled:
+ or $r14 $r14
+ bra e #mmctx_multi_disabled
+ iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
+ iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
+ bset $r9 1 // MULTI_EN
+ mmctx_multi_disabled:
+ add b32 $r8 0x100
+
+ xbit $r11 $r10 0
+ shl b32 $r11 16 // DIR
+ bset $r11 12 // QLIMIT = 0x10
+ xbit $r14 $r10 1
+ shl b32 $r14 17
+ or $r11 $r14 // START_TRIGGER
+ iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+
+ // loop over the mmio list, and send requests to the hw
+ mmctx_exec_loop:
+ // wait for space in mmctx queue
+ mmctx_wait_free:
+ iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
+ and $r14 0x1f
+ bra e #mmctx_wait_free
+
+ // queue up an entry
+ ld b32 $r14 D[$r12]
+ or $r14 $r9
+ iowr I[$r8 + 0x300] $r14
+ add b32 $r12 4
+ cmpu b32 $r12 $r13
+ bra ne #mmctx_exec_loop
+
+ xbit $r11 $r10 2
+ bra ne #mmctx_stop
+ // wait for queue to empty
+ mmctx_fini_wait:
+ iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ and $r11 0x1f
+ cmpu b32 $r11 0x10
+ bra ne #mmctx_fini_wait
+ mov $r10 2 // DONE_MMCTX
+ call #wait_donez
+ bra #mmctx_done
+ mmctx_stop:
+ xbit $r11 $r10 0
+ shl b32 $r11 16 // DIR
+ bset $r11 12 // QLIMIT = 0x10
+ bset $r11 18 // STOP_TRIGGER
+ iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
+ mmctx_stop_wait:
+ // wait for STOP_TRIGGER to clear
+ iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
+ xbit $r11 $r11 18
+ bra ne #mmctx_stop_wait
+ mmctx_done:
+ trace_clr(T_MMCTX)
+ ret
+
+// Wait for DONE_STRAND
+//
+strand_wait:
+ push $r10
+ mov $r10 2
+ call #wait_donez
+ pop $r10
+ ret
+
+// unknown - call before issuing strand commands
+//
+strand_pre:
+ mov $r8 0x4afc
+ sethi $r8 0x20000
+ mov $r9 0xc
+ iowr I[$r8] $r9
+ call #strand_wait
+ ret
+
+// unknown - call after issuing strand commands
+//
+strand_post:
+ mov $r8 0x4afc
+ sethi $r8 0x20000
+ mov $r9 0xd
+ iowr I[$r8] $r9
+ call #strand_wait
+ ret
+
+// Selects strand set?!
+//
+// In: $r14 id
+//
+strand_set:
+ mov $r10 0x4ffc
+ sethi $r10 0x20000
+ sub b32 $r11 $r10 0x500
+ mov $r12 0xf
+ iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
+ mov $r12 0xb
+ iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
+ call #strand_wait
+ iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
+ mov $r12 0xa
+ iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
+ call #strand_wait
+ ret
+
+// Initialise strand context data
+//
+// In : $r15 context base
+// Out: $r15 context size (in bytes)
+//
+// Strandset(?) 3 hardcoded currently
+//
+strand_ctx_init:
+ trace_set(T_STRINIT)
+ call #strand_pre
+ mov $r14 3
+ call #strand_set
+ mov $r10 0x46fc
+ sethi $r10 0x20000
+ add b32 $r11 $r10 0x400
+ iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
+ mov $r12 1
+ iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
+ call #strand_wait
+ sub b32 $r12 $r0 1
+ iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
+ mov $r12 2
+ iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
+ call #strand_wait
+ call #strand_post
+
+ // read the size of each strand, poke the context offset of
+ // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
+ // about it later then.
+ mov $r8 0x880
+ shl b32 $r8 6
+ iord $r9 I[$r8 + 0x000] // STRANDS
+ add b32 $r8 0x2200
+ shr b32 $r14 $r15 8
+ ctx_init_strand_loop:
+ iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
+ iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
+ iord $r10 I[$r8 + 0x200] // STRAND_SIZE
+ shr b32 $r10 6
+ add b32 $r10 1
+ add b32 $r14 $r10
+ add b32 $r8 4
+ sub b32 $r9 1
+ bra ne #ctx_init_strand_loop
+
+ shl b32 $r14 8
+ sub b32 $r15 $r14 $r15
+ trace_clr(T_STRINIT)
+ ret
+')
--- /dev/null
+/*
+ * Copyright 2007 Stephane Marchesin
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include "nouveau_util.h"
+#include <core/ramht.h>
+
+struct nv04_graph_engine {
+ struct nouveau_exec_engine base;
+};
+
+static uint32_t nv04_graph_ctx_regs[] = {
+ 0x0040053c,
+ 0x00400544,
+ 0x00400540,
+ 0x00400548,
+ NV04_PGRAPH_CTX_SWITCH1,
+ NV04_PGRAPH_CTX_SWITCH2,
+ NV04_PGRAPH_CTX_SWITCH3,
+ NV04_PGRAPH_CTX_SWITCH4,
+ NV04_PGRAPH_CTX_CACHE1,
+ NV04_PGRAPH_CTX_CACHE2,
+ NV04_PGRAPH_CTX_CACHE3,
+ NV04_PGRAPH_CTX_CACHE4,
+ 0x00400184,
+ 0x004001a4,
+ 0x004001c4,
+ 0x004001e4,
+ 0x00400188,
+ 0x004001a8,
+ 0x004001c8,
+ 0x004001e8,
+ 0x0040018c,
+ 0x004001ac,
+ 0x004001cc,
+ 0x004001ec,
+ 0x00400190,
+ 0x004001b0,
+ 0x004001d0,
+ 0x004001f0,
+ 0x00400194,
+ 0x004001b4,
+ 0x004001d4,
+ 0x004001f4,
+ 0x00400198,
+ 0x004001b8,
+ 0x004001d8,
+ 0x004001f8,
+ 0x0040019c,
+ 0x004001bc,
+ 0x004001dc,
+ 0x004001fc,
+ 0x00400174,
+ NV04_PGRAPH_DMA_START_0,
+ NV04_PGRAPH_DMA_START_1,
+ NV04_PGRAPH_DMA_LENGTH,
+ NV04_PGRAPH_DMA_MISC,
+ NV04_PGRAPH_DMA_PITCH,
+ NV04_PGRAPH_BOFFSET0,
+ NV04_PGRAPH_BBASE0,
+ NV04_PGRAPH_BLIMIT0,
+ NV04_PGRAPH_BOFFSET1,
+ NV04_PGRAPH_BBASE1,
+ NV04_PGRAPH_BLIMIT1,
+ NV04_PGRAPH_BOFFSET2,
+ NV04_PGRAPH_BBASE2,
+ NV04_PGRAPH_BLIMIT2,
+ NV04_PGRAPH_BOFFSET3,
+ NV04_PGRAPH_BBASE3,
+ NV04_PGRAPH_BLIMIT3,
+ NV04_PGRAPH_BOFFSET4,
+ NV04_PGRAPH_BBASE4,
+ NV04_PGRAPH_BLIMIT4,
+ NV04_PGRAPH_BOFFSET5,
+ NV04_PGRAPH_BBASE5,
+ NV04_PGRAPH_BLIMIT5,
+ NV04_PGRAPH_BPITCH0,
+ NV04_PGRAPH_BPITCH1,
+ NV04_PGRAPH_BPITCH2,
+ NV04_PGRAPH_BPITCH3,
+ NV04_PGRAPH_BPITCH4,
+ NV04_PGRAPH_SURFACE,
+ NV04_PGRAPH_STATE,
+ NV04_PGRAPH_BSWIZZLE2,
+ NV04_PGRAPH_BSWIZZLE5,
+ NV04_PGRAPH_BPIXEL,
+ NV04_PGRAPH_NOTIFY,
+ NV04_PGRAPH_PATT_COLOR0,
+ NV04_PGRAPH_PATT_COLOR1,
+ NV04_PGRAPH_PATT_COLORRAM+0x00,
+ NV04_PGRAPH_PATT_COLORRAM+0x04,
+ NV04_PGRAPH_PATT_COLORRAM+0x08,
+ NV04_PGRAPH_PATT_COLORRAM+0x0c,
+ NV04_PGRAPH_PATT_COLORRAM+0x10,
+ NV04_PGRAPH_PATT_COLORRAM+0x14,
+ NV04_PGRAPH_PATT_COLORRAM+0x18,
+ NV04_PGRAPH_PATT_COLORRAM+0x1c,
+ NV04_PGRAPH_PATT_COLORRAM+0x20,
+ NV04_PGRAPH_PATT_COLORRAM+0x24,
+ NV04_PGRAPH_PATT_COLORRAM+0x28,
+ NV04_PGRAPH_PATT_COLORRAM+0x2c,
+ NV04_PGRAPH_PATT_COLORRAM+0x30,
+ NV04_PGRAPH_PATT_COLORRAM+0x34,
+ NV04_PGRAPH_PATT_COLORRAM+0x38,
+ NV04_PGRAPH_PATT_COLORRAM+0x3c,
+ NV04_PGRAPH_PATT_COLORRAM+0x40,
+ NV04_PGRAPH_PATT_COLORRAM+0x44,
+ NV04_PGRAPH_PATT_COLORRAM+0x48,
+ NV04_PGRAPH_PATT_COLORRAM+0x4c,
+ NV04_PGRAPH_PATT_COLORRAM+0x50,
+ NV04_PGRAPH_PATT_COLORRAM+0x54,
+ NV04_PGRAPH_PATT_COLORRAM+0x58,
+ NV04_PGRAPH_PATT_COLORRAM+0x5c,
+ NV04_PGRAPH_PATT_COLORRAM+0x60,
+ NV04_PGRAPH_PATT_COLORRAM+0x64,
+ NV04_PGRAPH_PATT_COLORRAM+0x68,
+ NV04_PGRAPH_PATT_COLORRAM+0x6c,
+ NV04_PGRAPH_PATT_COLORRAM+0x70,
+ NV04_PGRAPH_PATT_COLORRAM+0x74,
+ NV04_PGRAPH_PATT_COLORRAM+0x78,
+ NV04_PGRAPH_PATT_COLORRAM+0x7c,
+ NV04_PGRAPH_PATT_COLORRAM+0x80,
+ NV04_PGRAPH_PATT_COLORRAM+0x84,
+ NV04_PGRAPH_PATT_COLORRAM+0x88,
+ NV04_PGRAPH_PATT_COLORRAM+0x8c,
+ NV04_PGRAPH_PATT_COLORRAM+0x90,
+ NV04_PGRAPH_PATT_COLORRAM+0x94,
+ NV04_PGRAPH_PATT_COLORRAM+0x98,
+ NV04_PGRAPH_PATT_COLORRAM+0x9c,
+ NV04_PGRAPH_PATT_COLORRAM+0xa0,
+ NV04_PGRAPH_PATT_COLORRAM+0xa4,
+ NV04_PGRAPH_PATT_COLORRAM+0xa8,
+ NV04_PGRAPH_PATT_COLORRAM+0xac,
+ NV04_PGRAPH_PATT_COLORRAM+0xb0,
+ NV04_PGRAPH_PATT_COLORRAM+0xb4,
+ NV04_PGRAPH_PATT_COLORRAM+0xb8,
+ NV04_PGRAPH_PATT_COLORRAM+0xbc,
+ NV04_PGRAPH_PATT_COLORRAM+0xc0,
+ NV04_PGRAPH_PATT_COLORRAM+0xc4,
+ NV04_PGRAPH_PATT_COLORRAM+0xc8,
+ NV04_PGRAPH_PATT_COLORRAM+0xcc,
+ NV04_PGRAPH_PATT_COLORRAM+0xd0,
+ NV04_PGRAPH_PATT_COLORRAM+0xd4,
+ NV04_PGRAPH_PATT_COLORRAM+0xd8,
+ NV04_PGRAPH_PATT_COLORRAM+0xdc,
+ NV04_PGRAPH_PATT_COLORRAM+0xe0,
+ NV04_PGRAPH_PATT_COLORRAM+0xe4,
+ NV04_PGRAPH_PATT_COLORRAM+0xe8,
+ NV04_PGRAPH_PATT_COLORRAM+0xec,
+ NV04_PGRAPH_PATT_COLORRAM+0xf0,
+ NV04_PGRAPH_PATT_COLORRAM+0xf4,
+ NV04_PGRAPH_PATT_COLORRAM+0xf8,
+ NV04_PGRAPH_PATT_COLORRAM+0xfc,
+ NV04_PGRAPH_PATTERN,
+ 0x0040080c,
+ NV04_PGRAPH_PATTERN_SHAPE,
+ 0x00400600,
+ NV04_PGRAPH_ROP3,
+ NV04_PGRAPH_CHROMA,
+ NV04_PGRAPH_BETA_AND,
+ NV04_PGRAPH_BETA_PREMULT,
+ NV04_PGRAPH_CONTROL0,
+ NV04_PGRAPH_CONTROL1,
+ NV04_PGRAPH_CONTROL2,
+ NV04_PGRAPH_BLEND,
+ NV04_PGRAPH_STORED_FMT,
+ NV04_PGRAPH_SOURCE_COLOR,
+ 0x00400560,
+ 0x00400568,
+ 0x00400564,
+ 0x0040056c,
+ 0x00400400,
+ 0x00400480,
+ 0x00400404,
+ 0x00400484,
+ 0x00400408,
+ 0x00400488,
+ 0x0040040c,
+ 0x0040048c,
+ 0x00400410,
+ 0x00400490,
+ 0x00400414,
+ 0x00400494,
+ 0x00400418,
+ 0x00400498,
+ 0x0040041c,
+ 0x0040049c,
+ 0x00400420,
+ 0x004004a0,
+ 0x00400424,
+ 0x004004a4,
+ 0x00400428,
+ 0x004004a8,
+ 0x0040042c,
+ 0x004004ac,
+ 0x00400430,
+ 0x004004b0,
+ 0x00400434,
+ 0x004004b4,
+ 0x00400438,
+ 0x004004b8,
+ 0x0040043c,
+ 0x004004bc,
+ 0x00400440,
+ 0x004004c0,
+ 0x00400444,
+ 0x004004c4,
+ 0x00400448,
+ 0x004004c8,
+ 0x0040044c,
+ 0x004004cc,
+ 0x00400450,
+ 0x004004d0,
+ 0x00400454,
+ 0x004004d4,
+ 0x00400458,
+ 0x004004d8,
+ 0x0040045c,
+ 0x004004dc,
+ 0x00400460,
+ 0x004004e0,
+ 0x00400464,
+ 0x004004e4,
+ 0x00400468,
+ 0x004004e8,
+ 0x0040046c,
+ 0x004004ec,
+ 0x00400470,
+ 0x004004f0,
+ 0x00400474,
+ 0x004004f4,
+ 0x00400478,
+ 0x004004f8,
+ 0x0040047c,
+ 0x004004fc,
+ 0x00400534,
+ 0x00400538,
+ 0x00400514,
+ 0x00400518,
+ 0x0040051c,
+ 0x00400520,
+ 0x00400524,
+ 0x00400528,
+ 0x0040052c,
+ 0x00400530,
+ 0x00400d00,
+ 0x00400d40,
+ 0x00400d80,
+ 0x00400d04,
+ 0x00400d44,
+ 0x00400d84,
+ 0x00400d08,
+ 0x00400d48,
+ 0x00400d88,
+ 0x00400d0c,
+ 0x00400d4c,
+ 0x00400d8c,
+ 0x00400d10,
+ 0x00400d50,
+ 0x00400d90,
+ 0x00400d14,
+ 0x00400d54,
+ 0x00400d94,
+ 0x00400d18,
+ 0x00400d58,
+ 0x00400d98,
+ 0x00400d1c,
+ 0x00400d5c,
+ 0x00400d9c,
+ 0x00400d20,
+ 0x00400d60,
+ 0x00400da0,
+ 0x00400d24,
+ 0x00400d64,
+ 0x00400da4,
+ 0x00400d28,
+ 0x00400d68,
+ 0x00400da8,
+ 0x00400d2c,
+ 0x00400d6c,
+ 0x00400dac,
+ 0x00400d30,
+ 0x00400d70,
+ 0x00400db0,
+ 0x00400d34,
+ 0x00400d74,
+ 0x00400db4,
+ 0x00400d38,
+ 0x00400d78,
+ 0x00400db8,
+ 0x00400d3c,
+ 0x00400d7c,
+ 0x00400dbc,
+ 0x00400590,
+ 0x00400594,
+ 0x00400598,
+ 0x0040059c,
+ 0x004005a8,
+ 0x004005ac,
+ 0x004005b0,
+ 0x004005b4,
+ 0x004005c0,
+ 0x004005c4,
+ 0x004005c8,
+ 0x004005cc,
+ 0x004005d0,
+ 0x004005d4,
+ 0x004005d8,
+ 0x004005dc,
+ 0x004005e0,
+ NV04_PGRAPH_PASSTHRU_0,
+ NV04_PGRAPH_PASSTHRU_1,
+ NV04_PGRAPH_PASSTHRU_2,
+ NV04_PGRAPH_DVD_COLORFMT,
+ NV04_PGRAPH_SCALED_FORMAT,
+ NV04_PGRAPH_MISC24_0,
+ NV04_PGRAPH_MISC24_1,
+ NV04_PGRAPH_MISC24_2,
+ 0x00400500,
+ 0x00400504,
+ NV04_PGRAPH_VALID1,
+ NV04_PGRAPH_VALID2,
+ NV04_PGRAPH_DEBUG_3
+};
+
+struct graph_state {
+ uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
+};
+
+static struct nouveau_channel *
+nv04_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chid = 15;
+
+ if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
+ chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
+
+ if (chid > 15)
+ return NULL;
+
+ return dev_priv->channels.ptr[chid];
+}
+
+static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
+ if (nv04_graph_ctx_regs[i] == reg)
+ return &ctx->nv04[i];
+ }
+
+ return NULL;
+}
+
+static int
+nv04_graph_load_context(struct nouveau_channel *chan)
+{
+ struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ uint32_t tmp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+ nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
+
+ nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
+
+ tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
+
+ tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
+ nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
+
+ return 0;
+}
+
+static int
+nv04_graph_unload_context(struct drm_device *dev)
+{
+ struct nouveau_channel *chan = NULL;
+ struct graph_state *ctx;
+ uint32_t tmp;
+ int i;
+
+ chan = nv04_graph_channel(dev);
+ if (!chan)
+ return 0;
+ ctx = chan->engctx[NVOBJ_ENGINE_GR];
+
+ for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
+ ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
+
+ nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
+ tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= 15 << 24;
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
+ return 0;
+}
+
+static int
+nv04_graph_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct graph_state *pgraph_ctx;
+ NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
+
+ pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
+ if (pgraph_ctx == NULL)
+ return -ENOMEM;
+
+ *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
+
+ chan->engctx[engine] = pgraph_ctx;
+ return 0;
+}
+
+static void
+nv04_graph_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state *pgraph_ctx = chan->engctx[engine];
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+
+ /* Unload the context if it's the currently active one */
+ if (nv04_graph_channel(dev) == chan)
+ nv04_graph_unload_context(dev);
+
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the context resources */
+ kfree(pgraph_ctx);
+ chan->engctx[engine] = NULL;
+}
+
+int
+nv04_graph_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 1;
+ obj->class = class;
+
+#ifdef __BIG_ENDIAN
+ nv_wo32(obj, 0x00, 0x00080000 | class);
+#else
+ nv_wo32(obj, 0x00, class);
+#endif
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static int
+nv04_graph_init(struct drm_device *dev, int engine)
+{
+ uint32_t tmp;
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ /* Enable PGRAPH interrupts */
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
+ nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
+ /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
+ /*1231C000 blob, 001 haiku*/
+ /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
+ /*0x72111100 blob , 01 haiku*/
+ /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
+ /*haiku same*/
+
+ /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
+ /*haiku and blob 10d4*/
+
+ nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
+ tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= 15 << 24;
+ nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
+
+ /* These don't belong here, they're part of a per-channel context */
+ nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
+
+ return 0;
+}
+
+static int
+nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ return -EBUSY;
+ }
+ nv04_graph_unload_context(dev);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ return 0;
+}
+
+/*
+ * Software methods, why they are needed, and how they all work:
+ *
+ * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
+ * 2d engine settings are kept inside the grobjs themselves. The grobjs are
+ * 3 words long on both. grobj format on NV04 is:
+ *
+ * word 0:
+ * - bits 0-7: class
+ * - bit 12: color key active
+ * - bit 13: clip rect active
+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ * NV03_CONTEXT_SURFACE_DST].
+ * - bits 15-17: 2d operation [aka patch config]
+ * - bit 24: patch valid [enables rendering using this object]
+ * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
+ * word 1:
+ * - bits 0-1: mono format
+ * - bits 8-13: color format
+ * - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ * - bits 0-15: DMA_A instance
+ * - bits 16-31: DMA_B instance
+ *
+ * On NV05 it's:
+ *
+ * word 0:
+ * - bits 0-7: class
+ * - bit 12: color key active
+ * - bit 13: clip rect active
+ * - bit 14: if set, destination surface is swizzled and taken from buffer 5
+ * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
+ * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
+ * NV03_CONTEXT_SURFACE_DST].
+ * - bits 15-17: 2d operation [aka patch config]
+ * - bits 20-22: dither mode
+ * - bit 24: patch valid [enables rendering using this object]
+ * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
+ * - bit 26: surface_src/surface_zeta valid
+ * - bit 27: pattern valid
+ * - bit 28: rop valid
+ * - bit 29: beta1 valid
+ * - bit 30: beta4 valid
+ * word 1:
+ * - bits 0-1: mono format
+ * - bits 8-13: color format
+ * - bits 16-31: DMA_NOTIFY instance
+ * word 2:
+ * - bits 0-15: DMA_A instance
+ * - bits 16-31: DMA_B instance
+ *
+ * NV05 will set/unset the relevant valid bits when you poke the relevant
+ * object-binding methods with object of the proper type, or with the NULL
+ * type. It'll only allow rendering using the grobj if all needed objects
+ * are bound. The needed set of objects depends on selected operation: for
+ * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
+ *
+ * NV04 doesn't have these methods implemented at all, and doesn't have the
+ * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
+ * is set. So we have to emulate them in software, internally keeping the
+ * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
+ * but the last word isn't actually used for anything, we abuse it for this
+ * purpose.
+ *
+ * Actually, NV05 can optionally check bit 24 too, but we disable this since
+ * there's no use for it.
+ *
+ * For unknown reasons, NV04 implements surf3d binding in hardware as an
+ * exception. Also for unknown reasons, NV04 doesn't implement the clipping
+ * methods on the surf3d object, so we have to emulate them too.
+ */
+
+static void
+nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
+{
+ struct drm_device *dev = chan->dev;
+ u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
+ u32 tmp;
+
+ tmp = nv_ri32(dev, instance);
+ tmp &= ~mask;
+ tmp |= value;
+
+ nv_wi32(dev, instance, tmp);
+ nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
+ nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
+}
+
+static void
+nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
+{
+ struct drm_device *dev = chan->dev;
+ u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
+ u32 tmp, ctx1;
+ int class, op, valid = 1;
+
+ ctx1 = nv_ri32(dev, instance);
+ class = ctx1 & 0xff;
+ op = (ctx1 >> 15) & 7;
+ tmp = nv_ri32(dev, instance + 0xc);
+ tmp &= ~mask;
+ tmp |= value;
+ nv_wi32(dev, instance + 0xc, tmp);
+
+ /* check for valid surf2d/surf_dst/surf_color */
+ if (!(tmp & 0x02000000))
+ valid = 0;
+ /* check for valid surf_src/surf_zeta */
+ if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
+ valid = 0;
+
+ switch (op) {
+ /* SRCCOPY_AND, SRCCOPY: no extra objects required */
+ case 0:
+ case 3:
+ break;
+ /* ROP_AND: requires pattern and rop */
+ case 1:
+ if (!(tmp & 0x18000000))
+ valid = 0;
+ break;
+ /* BLEND_AND: requires beta1 */
+ case 2:
+ if (!(tmp & 0x20000000))
+ valid = 0;
+ break;
+ /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
+ case 4:
+ case 5:
+ if (!(tmp & 0x40000000))
+ valid = 0;
+ break;
+ }
+
+ nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
+}
+
+static int
+nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ if (data > 5)
+ return 1;
+ /* Old versions of the objects only accept first three operations. */
+ if (data > 2 && class < 0x40)
+ return 1;
+ nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
+ /* changing operation changes set of objects needed for validation */
+ nv04_graph_set_ctx_val(chan, 0, 0);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ uint32_t min = data & 0xffff, max;
+ uint32_t w = data >> 16;
+ if (min & 0x8000)
+ /* too large */
+ return 1;
+ if (w & 0x8000)
+ /* yes, it accepts negative for some reason. */
+ w |= 0xffff0000;
+ max = min + w;
+ max &= 0x3ffff;
+ nv_wr32(chan->dev, 0x40053c, min);
+ nv_wr32(chan->dev, 0x400544, max);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ uint32_t min = data & 0xffff, max;
+ uint32_t w = data >> 16;
+ if (min & 0x8000)
+ /* too large */
+ return 1;
+ if (w & 0x8000)
+ /* yes, it accepts negative for some reason. */
+ w |= 0xffff0000;
+ max = min + w;
+ max &= 0x3ffff;
+ nv_wr32(chan->dev, 0x400540, min);
+ nv_wr32(chan->dev, 0x400548, max);
+ return 0;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x42:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x42:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ case 0x52:
+ nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ return 0;
+ case 0x18:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0);
+ return 0;
+ case 0x44:
+ nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x10000000, 0);
+ return 0;
+ case 0x43:
+ nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x20000000, 0);
+ return 0;
+ case 0x12:
+ nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x40000000, 0);
+ return 0;
+ case 0x72:
+ nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x58:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ return 0;
+ case 0x59:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0);
+ return 0;
+ case 0x5a:
+ nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0);
+ return 0;
+ case 0x5b:
+ nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x2000, 0);
+ return 0;
+ case 0x19:
+ nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
+ return 0;
+ }
+ return 1;
+}
+
+static int
+nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ switch (nv_ri32(chan->dev, data << 4) & 0xff) {
+ case 0x30:
+ nv04_graph_set_ctx1(chan, 0x1000, 0);
+ return 0;
+ /* Yes, for some reason even the old versions of objects
+ * accept 0x57 and not 0x17. Consistency be damned.
+ */
+ case 0x57:
+ nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
+ return 0;
+ }
+ return 1;
+}
+
+static struct nouveau_bitfield nv04_graph_intr[] = {
+ { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+ {}
+};
+
+static struct nouveau_bitfield nv04_graph_nstatus[] = {
+ { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
+ {}
+};
+
+struct nouveau_bitfield nv04_graph_nsource[] = {
+ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
+ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
+ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
+ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
+ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
+ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
+ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
+ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
+ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
+ {}
+};
+
+static void
+nv04_graph_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ int chid;
+
+ nouveau_wait_for_idle(dev);
+
+ /* If previous context is valid, we need to save it */
+ nv04_graph_unload_context(dev);
+
+ /* Load context for next channel */
+ chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
+ NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
+ chan = dev_priv->channels.ptr[chid];
+ if (chan)
+ nv04_graph_load_context(chan);
+}
+
+static void
+nv04_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x0f000000) >> 24;
+ u32 subc = (addr & 0x0000e000) >> 13;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_NOTIFY) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_NOTIFY;
+ }
+ }
+
+ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv04_graph_context_switch(dev);
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv04_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
+
+static void
+nv04_graph_destroy(struct drm_device *dev, int engine)
+{
+ struct nv04_graph_engine *pgraph = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 12);
+
+ NVOBJ_ENGINE_DEL(dev, GR);
+ kfree(pgraph);
+}
+
+int
+nv04_graph_create(struct drm_device *dev)
+{
+ struct nv04_graph_engine *pgraph;
+
+ pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
+ if (!pgraph)
+ return -ENOMEM;
+
+ pgraph->base.destroy = nv04_graph_destroy;
+ pgraph->base.init = nv04_graph_init;
+ pgraph->base.fini = nv04_graph_fini;
+ pgraph->base.context_new = nv04_graph_context_new;
+ pgraph->base.context_del = nv04_graph_context_del;
+ pgraph->base.object_new = nv04_graph_object_new;
+
+ NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
+ nouveau_irq_register(dev, 12, nv04_graph_isr);
+
+ /* dvd subpicture */
+ NVOBJ_CLASS(dev, 0x0038, GR);
+
+ /* m2mf */
+ NVOBJ_CLASS(dev, 0x0039, GR);
+
+ /* nv03 gdirect */
+ NVOBJ_CLASS(dev, 0x004b, GR);
+ NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 gdirect */
+ NVOBJ_CLASS(dev, 0x004a, GR);
+ NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 imageblit */
+ NVOBJ_CLASS(dev, 0x001f, GR);
+ NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
+ NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 imageblit */
+ NVOBJ_CLASS(dev, 0x005f, GR);
+ NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 iifc */
+ NVOBJ_CLASS(dev, 0x0060, GR);
+ NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
+ NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
+
+ /* nv05 iifc */
+ NVOBJ_CLASS(dev, 0x0064, GR);
+
+ /* nv01 ifc */
+ NVOBJ_CLASS(dev, 0x0021, GR);
+ NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 ifc */
+ NVOBJ_CLASS(dev, 0x0061, GR);
+ NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv05 ifc */
+ NVOBJ_CLASS(dev, 0x0065, GR);
+
+ /* nv03 sifc */
+ NVOBJ_CLASS(dev, 0x0036, GR);
+ NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 sifc */
+ NVOBJ_CLASS(dev, 0x0076, GR);
+ NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
+ NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv05 sifc */
+ NVOBJ_CLASS(dev, 0x0066, GR);
+
+ /* nv03 sifm */
+ NVOBJ_CLASS(dev, 0x0037, GR);
+ NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
+
+ /* nv04 sifm */
+ NVOBJ_CLASS(dev, 0x0077, GR);
+ NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
+ NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
+
+ /* null */
+ NVOBJ_CLASS(dev, 0x0030, GR);
+
+ /* surf2d */
+ NVOBJ_CLASS(dev, 0x0042, GR);
+
+ /* rop */
+ NVOBJ_CLASS(dev, 0x0043, GR);
+
+ /* beta1 */
+ NVOBJ_CLASS(dev, 0x0012, GR);
+
+ /* beta4 */
+ NVOBJ_CLASS(dev, 0x0072, GR);
+
+ /* cliprect */
+ NVOBJ_CLASS(dev, 0x0019, GR);
+
+ /* nv01 pattern */
+ NVOBJ_CLASS(dev, 0x0018, GR);
+
+ /* nv04 pattern */
+ NVOBJ_CLASS(dev, 0x0044, GR);
+
+ /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0052, GR);
+
+ /* surf3d */
+ NVOBJ_CLASS(dev, 0x0053, GR);
+ NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
+ NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
+
+ /* nv03 tex_tri */
+ NVOBJ_CLASS(dev, 0x0048, GR);
+ NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
+ NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
+
+ /* tex_tri */
+ NVOBJ_CLASS(dev, 0x0054, GR);
+
+ /* multitex_tri */
+ NVOBJ_CLASS(dev, 0x0055, GR);
+
+ /* nv01 chroma */
+ NVOBJ_CLASS(dev, 0x0017, GR);
+
+ /* nv04 chroma */
+ NVOBJ_CLASS(dev, 0x0057, GR);
+
+ /* surf_dst */
+ NVOBJ_CLASS(dev, 0x0058, GR);
+
+ /* surf_src */
+ NVOBJ_CLASS(dev, 0x0059, GR);
+
+ /* surf_color */
+ NVOBJ_CLASS(dev, 0x005a, GR);
+
+ /* surf_zeta */
+ NVOBJ_CLASS(dev, 0x005b, GR);
+
+ /* nv01 line */
+ NVOBJ_CLASS(dev, 0x001c, GR);
+ NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 line */
+ NVOBJ_CLASS(dev, 0x005c, GR);
+ NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 tri */
+ NVOBJ_CLASS(dev, 0x001d, GR);
+ NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 tri */
+ NVOBJ_CLASS(dev, 0x005d, GR);
+ NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv01 rect */
+ NVOBJ_CLASS(dev, 0x001e, GR);
+ NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
+ NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
+ NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
+
+ /* nv04 rect */
+ NVOBJ_CLASS(dev, 0x005e, GR);
+ NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
+ NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
+ NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
+ NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
+ NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
+ NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
+ NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+
+struct nv10_graph_engine {
+ struct nouveau_exec_engine base;
+};
+
+struct pipe_state {
+ uint32_t pipe_0x0000[0x040/4];
+ uint32_t pipe_0x0040[0x010/4];
+ uint32_t pipe_0x0200[0x0c0/4];
+ uint32_t pipe_0x4400[0x080/4];
+ uint32_t pipe_0x6400[0x3b0/4];
+ uint32_t pipe_0x6800[0x2f0/4];
+ uint32_t pipe_0x6c00[0x030/4];
+ uint32_t pipe_0x7000[0x130/4];
+ uint32_t pipe_0x7400[0x0c0/4];
+ uint32_t pipe_0x7800[0x0c0/4];
+};
+
+static int nv10_graph_ctx_regs[] = {
+ NV10_PGRAPH_CTX_SWITCH(0),
+ NV10_PGRAPH_CTX_SWITCH(1),
+ NV10_PGRAPH_CTX_SWITCH(2),
+ NV10_PGRAPH_CTX_SWITCH(3),
+ NV10_PGRAPH_CTX_SWITCH(4),
+ NV10_PGRAPH_CTX_CACHE(0, 0),
+ NV10_PGRAPH_CTX_CACHE(0, 1),
+ NV10_PGRAPH_CTX_CACHE(0, 2),
+ NV10_PGRAPH_CTX_CACHE(0, 3),
+ NV10_PGRAPH_CTX_CACHE(0, 4),
+ NV10_PGRAPH_CTX_CACHE(1, 0),
+ NV10_PGRAPH_CTX_CACHE(1, 1),
+ NV10_PGRAPH_CTX_CACHE(1, 2),
+ NV10_PGRAPH_CTX_CACHE(1, 3),
+ NV10_PGRAPH_CTX_CACHE(1, 4),
+ NV10_PGRAPH_CTX_CACHE(2, 0),
+ NV10_PGRAPH_CTX_CACHE(2, 1),
+ NV10_PGRAPH_CTX_CACHE(2, 2),
+ NV10_PGRAPH_CTX_CACHE(2, 3),
+ NV10_PGRAPH_CTX_CACHE(2, 4),
+ NV10_PGRAPH_CTX_CACHE(3, 0),
+ NV10_PGRAPH_CTX_CACHE(3, 1),
+ NV10_PGRAPH_CTX_CACHE(3, 2),
+ NV10_PGRAPH_CTX_CACHE(3, 3),
+ NV10_PGRAPH_CTX_CACHE(3, 4),
+ NV10_PGRAPH_CTX_CACHE(4, 0),
+ NV10_PGRAPH_CTX_CACHE(4, 1),
+ NV10_PGRAPH_CTX_CACHE(4, 2),
+ NV10_PGRAPH_CTX_CACHE(4, 3),
+ NV10_PGRAPH_CTX_CACHE(4, 4),
+ NV10_PGRAPH_CTX_CACHE(5, 0),
+ NV10_PGRAPH_CTX_CACHE(5, 1),
+ NV10_PGRAPH_CTX_CACHE(5, 2),
+ NV10_PGRAPH_CTX_CACHE(5, 3),
+ NV10_PGRAPH_CTX_CACHE(5, 4),
+ NV10_PGRAPH_CTX_CACHE(6, 0),
+ NV10_PGRAPH_CTX_CACHE(6, 1),
+ NV10_PGRAPH_CTX_CACHE(6, 2),
+ NV10_PGRAPH_CTX_CACHE(6, 3),
+ NV10_PGRAPH_CTX_CACHE(6, 4),
+ NV10_PGRAPH_CTX_CACHE(7, 0),
+ NV10_PGRAPH_CTX_CACHE(7, 1),
+ NV10_PGRAPH_CTX_CACHE(7, 2),
+ NV10_PGRAPH_CTX_CACHE(7, 3),
+ NV10_PGRAPH_CTX_CACHE(7, 4),
+ NV10_PGRAPH_CTX_USER,
+ NV04_PGRAPH_DMA_START_0,
+ NV04_PGRAPH_DMA_START_1,
+ NV04_PGRAPH_DMA_LENGTH,
+ NV04_PGRAPH_DMA_MISC,
+ NV10_PGRAPH_DMA_PITCH,
+ NV04_PGRAPH_BOFFSET0,
+ NV04_PGRAPH_BBASE0,
+ NV04_PGRAPH_BLIMIT0,
+ NV04_PGRAPH_BOFFSET1,
+ NV04_PGRAPH_BBASE1,
+ NV04_PGRAPH_BLIMIT1,
+ NV04_PGRAPH_BOFFSET2,
+ NV04_PGRAPH_BBASE2,
+ NV04_PGRAPH_BLIMIT2,
+ NV04_PGRAPH_BOFFSET3,
+ NV04_PGRAPH_BBASE3,
+ NV04_PGRAPH_BLIMIT3,
+ NV04_PGRAPH_BOFFSET4,
+ NV04_PGRAPH_BBASE4,
+ NV04_PGRAPH_BLIMIT4,
+ NV04_PGRAPH_BOFFSET5,
+ NV04_PGRAPH_BBASE5,
+ NV04_PGRAPH_BLIMIT5,
+ NV04_PGRAPH_BPITCH0,
+ NV04_PGRAPH_BPITCH1,
+ NV04_PGRAPH_BPITCH2,
+ NV04_PGRAPH_BPITCH3,
+ NV04_PGRAPH_BPITCH4,
+ NV10_PGRAPH_SURFACE,
+ NV10_PGRAPH_STATE,
+ NV04_PGRAPH_BSWIZZLE2,
+ NV04_PGRAPH_BSWIZZLE5,
+ NV04_PGRAPH_BPIXEL,
+ NV10_PGRAPH_NOTIFY,
+ NV04_PGRAPH_PATT_COLOR0,
+ NV04_PGRAPH_PATT_COLOR1,
+ NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
+ 0x00400904,
+ 0x00400908,
+ 0x0040090c,
+ 0x00400910,
+ 0x00400914,
+ 0x00400918,
+ 0x0040091c,
+ 0x00400920,
+ 0x00400924,
+ 0x00400928,
+ 0x0040092c,
+ 0x00400930,
+ 0x00400934,
+ 0x00400938,
+ 0x0040093c,
+ 0x00400940,
+ 0x00400944,
+ 0x00400948,
+ 0x0040094c,
+ 0x00400950,
+ 0x00400954,
+ 0x00400958,
+ 0x0040095c,
+ 0x00400960,
+ 0x00400964,
+ 0x00400968,
+ 0x0040096c,
+ 0x00400970,
+ 0x00400974,
+ 0x00400978,
+ 0x0040097c,
+ 0x00400980,
+ 0x00400984,
+ 0x00400988,
+ 0x0040098c,
+ 0x00400990,
+ 0x00400994,
+ 0x00400998,
+ 0x0040099c,
+ 0x004009a0,
+ 0x004009a4,
+ 0x004009a8,
+ 0x004009ac,
+ 0x004009b0,
+ 0x004009b4,
+ 0x004009b8,
+ 0x004009bc,
+ 0x004009c0,
+ 0x004009c4,
+ 0x004009c8,
+ 0x004009cc,
+ 0x004009d0,
+ 0x004009d4,
+ 0x004009d8,
+ 0x004009dc,
+ 0x004009e0,
+ 0x004009e4,
+ 0x004009e8,
+ 0x004009ec,
+ 0x004009f0,
+ 0x004009f4,
+ 0x004009f8,
+ 0x004009fc,
+ NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
+ 0x0040080c,
+ NV04_PGRAPH_PATTERN_SHAPE,
+ NV03_PGRAPH_MONO_COLOR0,
+ NV04_PGRAPH_ROP3,
+ NV04_PGRAPH_CHROMA,
+ NV04_PGRAPH_BETA_AND,
+ NV04_PGRAPH_BETA_PREMULT,
+ 0x00400e70,
+ 0x00400e74,
+ 0x00400e78,
+ 0x00400e7c,
+ 0x00400e80,
+ 0x00400e84,
+ 0x00400e88,
+ 0x00400e8c,
+ 0x00400ea0,
+ 0x00400ea4,
+ 0x00400ea8,
+ 0x00400e90,
+ 0x00400e94,
+ 0x00400e98,
+ 0x00400e9c,
+ NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
+ NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
+ 0x00400f04,
+ 0x00400f24,
+ 0x00400f08,
+ 0x00400f28,
+ 0x00400f0c,
+ 0x00400f2c,
+ 0x00400f10,
+ 0x00400f30,
+ 0x00400f14,
+ 0x00400f34,
+ 0x00400f18,
+ 0x00400f38,
+ 0x00400f1c,
+ 0x00400f3c,
+ NV10_PGRAPH_XFMODE0,
+ NV10_PGRAPH_XFMODE1,
+ NV10_PGRAPH_GLOBALSTATE0,
+ NV10_PGRAPH_GLOBALSTATE1,
+ NV04_PGRAPH_STORED_FMT,
+ NV04_PGRAPH_SOURCE_COLOR,
+ NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
+ NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
+ 0x00400404,
+ 0x00400484,
+ 0x00400408,
+ 0x00400488,
+ 0x0040040c,
+ 0x0040048c,
+ 0x00400410,
+ 0x00400490,
+ 0x00400414,
+ 0x00400494,
+ 0x00400418,
+ 0x00400498,
+ 0x0040041c,
+ 0x0040049c,
+ 0x00400420,
+ 0x004004a0,
+ 0x00400424,
+ 0x004004a4,
+ 0x00400428,
+ 0x004004a8,
+ 0x0040042c,
+ 0x004004ac,
+ 0x00400430,
+ 0x004004b0,
+ 0x00400434,
+ 0x004004b4,
+ 0x00400438,
+ 0x004004b8,
+ 0x0040043c,
+ 0x004004bc,
+ 0x00400440,
+ 0x004004c0,
+ 0x00400444,
+ 0x004004c4,
+ 0x00400448,
+ 0x004004c8,
+ 0x0040044c,
+ 0x004004cc,
+ 0x00400450,
+ 0x004004d0,
+ 0x00400454,
+ 0x004004d4,
+ 0x00400458,
+ 0x004004d8,
+ 0x0040045c,
+ 0x004004dc,
+ 0x00400460,
+ 0x004004e0,
+ 0x00400464,
+ 0x004004e4,
+ 0x00400468,
+ 0x004004e8,
+ 0x0040046c,
+ 0x004004ec,
+ 0x00400470,
+ 0x004004f0,
+ 0x00400474,
+ 0x004004f4,
+ 0x00400478,
+ 0x004004f8,
+ 0x0040047c,
+ 0x004004fc,
+ NV03_PGRAPH_ABS_UCLIP_XMIN,
+ NV03_PGRAPH_ABS_UCLIP_XMAX,
+ NV03_PGRAPH_ABS_UCLIP_YMIN,
+ NV03_PGRAPH_ABS_UCLIP_YMAX,
+ 0x00400550,
+ 0x00400558,
+ 0x00400554,
+ 0x0040055c,
+ NV03_PGRAPH_ABS_UCLIPA_XMIN,
+ NV03_PGRAPH_ABS_UCLIPA_XMAX,
+ NV03_PGRAPH_ABS_UCLIPA_YMIN,
+ NV03_PGRAPH_ABS_UCLIPA_YMAX,
+ NV03_PGRAPH_ABS_ICLIP_XMAX,
+ NV03_PGRAPH_ABS_ICLIP_YMAX,
+ NV03_PGRAPH_XY_LOGIC_MISC0,
+ NV03_PGRAPH_XY_LOGIC_MISC1,
+ NV03_PGRAPH_XY_LOGIC_MISC2,
+ NV03_PGRAPH_XY_LOGIC_MISC3,
+ NV03_PGRAPH_CLIPX_0,
+ NV03_PGRAPH_CLIPX_1,
+ NV03_PGRAPH_CLIPY_0,
+ NV03_PGRAPH_CLIPY_1,
+ NV10_PGRAPH_COMBINER0_IN_ALPHA,
+ NV10_PGRAPH_COMBINER1_IN_ALPHA,
+ NV10_PGRAPH_COMBINER0_IN_RGB,
+ NV10_PGRAPH_COMBINER1_IN_RGB,
+ NV10_PGRAPH_COMBINER_COLOR0,
+ NV10_PGRAPH_COMBINER_COLOR1,
+ NV10_PGRAPH_COMBINER0_OUT_ALPHA,
+ NV10_PGRAPH_COMBINER1_OUT_ALPHA,
+ NV10_PGRAPH_COMBINER0_OUT_RGB,
+ NV10_PGRAPH_COMBINER1_OUT_RGB,
+ NV10_PGRAPH_COMBINER_FINAL0,
+ NV10_PGRAPH_COMBINER_FINAL1,
+ 0x00400e00,
+ 0x00400e04,
+ 0x00400e08,
+ 0x00400e0c,
+ 0x00400e10,
+ 0x00400e14,
+ 0x00400e18,
+ 0x00400e1c,
+ 0x00400e20,
+ 0x00400e24,
+ 0x00400e28,
+ 0x00400e2c,
+ 0x00400e30,
+ 0x00400e34,
+ 0x00400e38,
+ 0x00400e3c,
+ NV04_PGRAPH_PASSTHRU_0,
+ NV04_PGRAPH_PASSTHRU_1,
+ NV04_PGRAPH_PASSTHRU_2,
+ NV10_PGRAPH_DIMX_TEXTURE,
+ NV10_PGRAPH_WDIMX_TEXTURE,
+ NV10_PGRAPH_DVD_COLORFMT,
+ NV10_PGRAPH_SCALED_FORMAT,
+ NV04_PGRAPH_MISC24_0,
+ NV04_PGRAPH_MISC24_1,
+ NV04_PGRAPH_MISC24_2,
+ NV03_PGRAPH_X_MISC,
+ NV03_PGRAPH_Y_MISC,
+ NV04_PGRAPH_VALID1,
+ NV04_PGRAPH_VALID2,
+};
+
+static int nv17_graph_ctx_regs[] = {
+ NV10_PGRAPH_DEBUG_4,
+ 0x004006b0,
+ 0x00400eac,
+ 0x00400eb0,
+ 0x00400eb4,
+ 0x00400eb8,
+ 0x00400ebc,
+ 0x00400ec0,
+ 0x00400ec4,
+ 0x00400ec8,
+ 0x00400ecc,
+ 0x00400ed0,
+ 0x00400ed4,
+ 0x00400ed8,
+ 0x00400edc,
+ 0x00400ee0,
+ 0x00400a00,
+ 0x00400a04,
+};
+
+struct graph_state {
+ int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
+ int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
+ struct pipe_state pipe_state;
+ uint32_t lma_window[4];
+};
+
+#define PIPE_SAVE(dev, state, addr) \
+ do { \
+ int __i; \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
+ state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
+ } while (0)
+
+#define PIPE_RESTORE(dev, state, addr) \
+ do { \
+ int __i; \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
+ } while (0)
+
+static void nv10_graph_save_pipe(struct nouveau_channel *chan)
+{
+ struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
+ struct pipe_state *pipe = &pgraph_ctx->pipe_state;
+ struct drm_device *dev = chan->dev;
+
+ PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
+ PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
+ PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
+ PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
+ PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
+ PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
+ PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
+ PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
+}
+
+static void nv10_graph_load_pipe(struct nouveau_channel *chan)
+{
+ struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
+ struct pipe_state *pipe = &pgraph_ctx->pipe_state;
+ struct drm_device *dev = chan->dev;
+ uint32_t xfmode0, xfmode1;
+ int i;
+
+ nouveau_wait_for_idle(dev);
+ /* XXX check haiku comments */
+ xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+
+ PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
+ nouveau_wait_for_idle(dev);
+
+ /* restore XFMODE */
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
+ PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
+ PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
+ PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
+ PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
+ PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
+ PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
+ PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
+ PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
+ nouveau_wait_for_idle(dev);
+}
+
+static void nv10_graph_create_pipe(struct nouveau_channel *chan)
+{
+ struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
+ struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
+ struct drm_device *dev = chan->dev;
+ uint32_t *fifo_pipe_state_addr;
+ int i;
+#define PIPE_INIT(addr) \
+ do { \
+ fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
+ } while (0)
+#define PIPE_INIT_END(addr) \
+ do { \
+ uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
+ ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
+ if (fifo_pipe_state_addr != __end_addr) \
+ NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \
+ addr, fifo_pipe_state_addr, __end_addr); \
+ } while (0)
+#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
+
+ PIPE_INIT(0x0200);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0200);
+
+ PIPE_INIT(0x6400);
+ for (i = 0; i < 211; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f000000);
+ NV_WRITE_PIPE_INIT(0x3f000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ PIPE_INIT_END(0x6400);
+
+ PIPE_INIT(0x6800);
+ for (i = 0; i < 162; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ for (i = 0; i < 25; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x6800);
+
+ PIPE_INIT(0x6c00);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0xbf800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x6c00);
+
+ PIPE_INIT(0x7000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ for (i = 0; i < 35; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7000);
+
+ PIPE_INIT(0x7400);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7400);
+
+ PIPE_INIT(0x7800);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7800);
+
+ PIPE_INIT(0x4400);
+ for (i = 0; i < 32; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x4400);
+
+ PIPE_INIT(0x0000);
+ for (i = 0; i < 16; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0000);
+
+ PIPE_INIT(0x0040);
+ for (i = 0; i < 4; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0040);
+
+#undef PIPE_INIT
+#undef PIPE_INIT_END
+#undef NV_WRITE_PIPE_INIT
+}
+
+static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
+ if (nv10_graph_ctx_regs[i] == reg)
+ return i;
+ }
+ NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
+ return -1;
+}
+
+static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
+ if (nv17_graph_ctx_regs[i] == reg)
+ return i;
+ }
+ NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
+ return -1;
+}
+
+static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
+ uint32_t inst)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
+ uint32_t ctx_user, ctx_switch[5];
+ int i, subchan = -1;
+
+ /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
+ * that cannot be restored via MMIO. Do it through the FIFO
+ * instead.
+ */
+
+ /* Look for a celsius object */
+ for (i = 0; i < 8; i++) {
+ int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
+
+ if (class == 0x56 || class == 0x96 || class == 0x99) {
+ subchan = i;
+ break;
+ }
+ }
+
+ if (subchan < 0 || !inst)
+ return;
+
+ /* Save the current ctx object */
+ ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
+ for (i = 0; i < 5; i++)
+ ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i));
+
+ /* Save the FIFO state */
+ st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
+ st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL);
+ st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH);
+ fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR);
+
+ for (i = 0; i < ARRAY_SIZE(fifo); i++)
+ fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i);
+
+ /* Switch to the celsius subchannel */
+ for (i = 0; i < 5; i++)
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i),
+ nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i)));
+ nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
+
+ /* Inject NV10TCL_DMA_VTXBUF */
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2,
+ 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
+ nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+
+ /* Restore the FIFO state */
+ for (i = 0; i < ARRAY_SIZE(fifo); i++)
+ nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]);
+
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
+
+ /* Restore the current ctx object */
+ for (i = 0; i < 5; i++)
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user);
+}
+
+static int
+nv10_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
+ uint32_t tmp;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+ nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
+ if (dev_priv->chipset >= 0x17) {
+ for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+ nv_wr32(dev, nv17_graph_ctx_regs[i],
+ pgraph_ctx->nv17[i]);
+ }
+
+ nv10_graph_load_pipe(chan);
+ nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1)
+ & 0xffff));
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
+ tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
+ return 0;
+}
+
+static int
+nv10_graph_unload_context(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ struct graph_state *ctx;
+ uint32_t tmp;
+ int i;
+
+ chan = nv10_graph_channel(dev);
+ if (!chan)
+ return 0;
+ ctx = chan->engctx[NVOBJ_ENGINE_GR];
+
+ for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
+ ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
+
+ if (dev_priv->chipset >= 0x17) {
+ for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
+ ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
+ }
+
+ nv10_graph_save_pipe(chan);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= 31 << 24;
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+ return 0;
+}
+
+static void
+nv10_graph_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = NULL;
+ int chid;
+
+ nouveau_wait_for_idle(dev);
+
+ /* If previous context is valid, we need to save it */
+ nv10_graph_unload_context(dev);
+
+ /* Load context for next channel */
+ chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+ chan = dev_priv->channels.ptr[chid];
+ if (chan && chan->engctx[NVOBJ_ENGINE_GR])
+ nv10_graph_load_context(chan);
+}
+
+#define NV_WRITE_CTX(reg, val) do { \
+ int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
+ if (offset > 0) \
+ pgraph_ctx->nv10[offset] = val; \
+ } while (0)
+
+#define NV17_WRITE_CTX(reg, val) do { \
+ int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
+ if (offset > 0) \
+ pgraph_ctx->nv17[offset] = val; \
+ } while (0)
+
+struct nouveau_channel *
+nv10_graph_channel(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chid = 31;
+
+ if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
+ chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
+
+ if (chid >= 31)
+ return NULL;
+
+ return dev_priv->channels.ptr[chid];
+}
+
+static int
+nv10_graph_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state *pgraph_ctx;
+
+ NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
+
+ pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
+ if (pgraph_ctx == NULL)
+ return -ENOMEM;
+ chan->engctx[engine] = pgraph_ctx;
+
+ NV_WRITE_CTX(0x00400e88, 0x08000000);
+ NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
+ NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
+ NV_WRITE_CTX(0x00400e10, 0x00001000);
+ NV_WRITE_CTX(0x00400e14, 0x00001000);
+ NV_WRITE_CTX(0x00400e30, 0x00080008);
+ NV_WRITE_CTX(0x00400e34, 0x00080008);
+ if (dev_priv->chipset >= 0x17) {
+ /* is it really needed ??? */
+ NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
+ nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
+ NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
+ NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
+ NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
+ NV17_WRITE_CTX(0x00400ec0, 0x00000080);
+ NV17_WRITE_CTX(0x00400ed0, 0x00000080);
+ }
+ NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
+
+ nv10_graph_create_pipe(chan);
+ return 0;
+}
+
+static void
+nv10_graph_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct graph_state *pgraph_ctx = chan->engctx[engine];
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+
+ /* Unload the context if it's the currently active one */
+ if (nv10_graph_channel(dev) == chan)
+ nv10_graph_unload_context(dev);
+
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the context resources */
+ chan->engctx[engine] = NULL;
+ kfree(pgraph_ctx);
+}
+
+static void
+nv10_graph_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
+}
+
+static int
+nv10_graph_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 tmp;
+ int i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
+ /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
+ (1<<29) |
+ (1<<31));
+ if (dev_priv->chipset >= 0x17) {
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
+ nv_wr32(dev, 0x400a10, 0x3ff3fb6);
+ nv_wr32(dev, 0x400838, 0x2f8684);
+ nv_wr32(dev, 0x40083c, 0x115f3f);
+ nv_wr32(dev, 0x004006b0, 0x40000020);
+ } else
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv10_graph_set_tile_region(dev, i);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
+
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= 31 << 24;
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
+
+ return 0;
+}
+
+static int
+nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ return -EBUSY;
+ }
+ nv10_graph_unload_context(dev);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ return 0;
+}
+
+static int
+nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ struct pipe_state *pipe = &ctx->pipe_state;
+ uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
+ uint32_t xfmode0, xfmode1;
+ int i;
+
+ ctx->lma_window[(mthd - 0x1638) / 4] = data;
+
+ if (mthd != 0x1644)
+ return 0;
+
+ nouveau_wait_for_idle(dev);
+
+ PIPE_SAVE(dev, pipe_0x0040, 0x0040);
+ PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
+
+ PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
+
+ nouveau_wait_for_idle(dev);
+
+ xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
+ xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
+
+ PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
+ PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
+ PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
+ PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
+ for (i = 0; i < 3; i++)
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
+
+ PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
+
+ nouveau_wait_for_idle(dev);
+
+ PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
+
+ nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
+ nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
+
+ PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
+ PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
+ PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
+ PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
+
+ nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
+ nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
+
+ nouveau_wait_for_idle(dev);
+
+ return 0;
+}
+
+static int
+nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
+ u32 class, u32 mthd, u32 data)
+{
+ struct drm_device *dev = chan->dev;
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
+ nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
+ nv_wr32(dev, 0x004006b0,
+ nv_rd32(dev, 0x004006b0) | 0x8 << 24);
+
+ return 0;
+}
+
+struct nouveau_bitfield nv10_graph_intr[] = {
+ { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
+ { NV_PGRAPH_INTR_ERROR, "ERROR" },
+ {}
+};
+
+struct nouveau_bitfield nv10_graph_nstatus[] = {
+ { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
+ {}
+};
+
+static void
+nv10_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x01f00000) >> 20;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ }
+ }
+
+ if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
+ nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
+ stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
+ nv10_graph_context_switch(dev);
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
+
+static void
+nv10_graph_destroy(struct drm_device *dev, int engine)
+{
+ struct nv10_graph_engine *pgraph = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 12);
+ kfree(pgraph);
+}
+
+int
+nv10_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv10_graph_engine *pgraph;
+
+ pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
+ if (!pgraph)
+ return -ENOMEM;
+
+ pgraph->base.destroy = nv10_graph_destroy;
+ pgraph->base.init = nv10_graph_init;
+ pgraph->base.fini = nv10_graph_fini;
+ pgraph->base.context_new = nv10_graph_context_new;
+ pgraph->base.context_del = nv10_graph_context_del;
+ pgraph->base.object_new = nv04_graph_object_new;
+ pgraph->base.set_tile_region = nv10_graph_set_tile_region;
+
+ NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
+ nouveau_irq_register(dev, 12, nv10_graph_isr);
+
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
+ NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
+ NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
+
+ /* celcius */
+ if (dev_priv->chipset <= 0x10) {
+ NVOBJ_CLASS(dev, 0x0056, GR);
+ } else
+ if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
+ NVOBJ_CLASS(dev, 0x0096, GR);
+ } else {
+ NVOBJ_CLASS(dev, 0x0099, GR);
+ NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
+ NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
+ }
+
+ return 0;
+}
--- /dev/null
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+/*
+ * NV20
+ * -----
+ * There are 3 families :
+ * NV20 is 0x10de:0x020*
+ * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
+ * NV2A is 0x10de:0x02A0
+ *
+ * NV30
+ * -----
+ * There are 3 families :
+ * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
+ * NV34 is 0x10de:0x032*
+ * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
+ *
+ * Not seen in the wild, no dumps (probably NV35) :
+ * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
+ * NV38 is 0x10de:0x0333, 0x10de:0x00fe
+ *
+ */
+
+struct nv20_graph_engine {
+ struct nouveau_exec_engine base;
+ struct nouveau_gpuobj *ctxtab;
+ void (*grctx_init)(struct nouveau_gpuobj *);
+ u32 grctx_size;
+ u32 grctx_user;
+};
+
+#define NV20_GRCTX_SIZE (3580*4)
+#define NV25_GRCTX_SIZE (3529*4)
+#define NV2A_GRCTX_SIZE (3500*4)
+
+#define NV30_31_GRCTX_SIZE (24392)
+#define NV34_GRCTX_SIZE (18140)
+#define NV35_36_GRCTX_SIZE (22396)
+
+int
+nv20_graph_unload_context(struct drm_device *dev)
+{
+ struct nouveau_channel *chan;
+ struct nouveau_gpuobj *grctx;
+ u32 tmp;
+
+ chan = nv10_graph_channel(dev);
+ if (!chan)
+ return 0;
+ grctx = chan->engctx[NVOBJ_ENGINE_GR];
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4);
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
+ NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
+
+ nouveau_wait_for_idle(dev);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
+ tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
+ tmp |= 31 << 24;
+ nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
+ return 0;
+}
+
+static void
+nv20_graph_rdi(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, writecount = 32;
+ uint32_t rdi_index = 0x2c80000;
+
+ if (dev_priv->chipset == 0x20) {
+ rdi_index = 0x3d0000;
+ writecount = 15;
+ }
+
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
+ for (i = 0; i < writecount; i++)
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
+
+ nouveau_wait_for_idle(dev);
+}
+
+static void
+nv20_graph_context_init(struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(ctx, 0x033c, 0xffff0000);
+ nv_wo32(ctx, 0x03a0, 0x0fff0000);
+ nv_wo32(ctx, 0x03a4, 0x0fff0000);
+ nv_wo32(ctx, 0x047c, 0x00000101);
+ nv_wo32(ctx, 0x0490, 0x00000111);
+ nv_wo32(ctx, 0x04a8, 0x44400000);
+ for (i = 0x04d4; i <= 0x04e0; i += 4)
+ nv_wo32(ctx, i, 0x00030303);
+ for (i = 0x04f4; i <= 0x0500; i += 4)
+ nv_wo32(ctx, i, 0x00080000);
+ for (i = 0x050c; i <= 0x0518; i += 4)
+ nv_wo32(ctx, i, 0x01012000);
+ for (i = 0x051c; i <= 0x0528; i += 4)
+ nv_wo32(ctx, i, 0x000105b8);
+ for (i = 0x052c; i <= 0x0538; i += 4)
+ nv_wo32(ctx, i, 0x00080008);
+ for (i = 0x055c; i <= 0x0598; i += 4)
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x05a4, 0x4b7fffff);
+ nv_wo32(ctx, 0x05fc, 0x00000001);
+ nv_wo32(ctx, 0x0604, 0x00004000);
+ nv_wo32(ctx, 0x0610, 0x00000001);
+ nv_wo32(ctx, 0x0618, 0x00040000);
+ nv_wo32(ctx, 0x061c, 0x00010000);
+ for (i = 0x1c1c; i <= 0x248c; i += 16) {
+ nv_wo32(ctx, (i + 0), 0x10700ff9);
+ nv_wo32(ctx, (i + 4), 0x0436086c);
+ nv_wo32(ctx, (i + 8), 0x000c001b);
+ }
+ nv_wo32(ctx, 0x281c, 0x3f800000);
+ nv_wo32(ctx, 0x2830, 0x3f800000);
+ nv_wo32(ctx, 0x285c, 0x40000000);
+ nv_wo32(ctx, 0x2860, 0x3f800000);
+ nv_wo32(ctx, 0x2864, 0x3f000000);
+ nv_wo32(ctx, 0x286c, 0x40000000);
+ nv_wo32(ctx, 0x2870, 0x3f800000);
+ nv_wo32(ctx, 0x2878, 0xbf800000);
+ nv_wo32(ctx, 0x2880, 0xbf800000);
+ nv_wo32(ctx, 0x34a4, 0x000fe000);
+ nv_wo32(ctx, 0x3530, 0x000003f8);
+ nv_wo32(ctx, 0x3540, 0x002fe000);
+ for (i = 0x355c; i <= 0x3578; i += 4)
+ nv_wo32(ctx, i, 0x001c527c);
+}
+
+static void
+nv25_graph_context_init(struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(ctx, 0x035c, 0xffff0000);
+ nv_wo32(ctx, 0x03c0, 0x0fff0000);
+ nv_wo32(ctx, 0x03c4, 0x0fff0000);
+ nv_wo32(ctx, 0x049c, 0x00000101);
+ nv_wo32(ctx, 0x04b0, 0x00000111);
+ nv_wo32(ctx, 0x04c8, 0x00000080);
+ nv_wo32(ctx, 0x04cc, 0xffff0000);
+ nv_wo32(ctx, 0x04d0, 0x00000001);
+ nv_wo32(ctx, 0x04e4, 0x44400000);
+ nv_wo32(ctx, 0x04fc, 0x4b800000);
+ for (i = 0x0510; i <= 0x051c; i += 4)
+ nv_wo32(ctx, i, 0x00030303);
+ for (i = 0x0530; i <= 0x053c; i += 4)
+ nv_wo32(ctx, i, 0x00080000);
+ for (i = 0x0548; i <= 0x0554; i += 4)
+ nv_wo32(ctx, i, 0x01012000);
+ for (i = 0x0558; i <= 0x0564; i += 4)
+ nv_wo32(ctx, i, 0x000105b8);
+ for (i = 0x0568; i <= 0x0574; i += 4)
+ nv_wo32(ctx, i, 0x00080008);
+ for (i = 0x0598; i <= 0x05d4; i += 4)
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x05e0, 0x4b7fffff);
+ nv_wo32(ctx, 0x0620, 0x00000080);
+ nv_wo32(ctx, 0x0624, 0x30201000);
+ nv_wo32(ctx, 0x0628, 0x70605040);
+ nv_wo32(ctx, 0x062c, 0xb0a09080);
+ nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
+ nv_wo32(ctx, 0x0664, 0x00000001);
+ nv_wo32(ctx, 0x066c, 0x00004000);
+ nv_wo32(ctx, 0x0678, 0x00000001);
+ nv_wo32(ctx, 0x0680, 0x00040000);
+ nv_wo32(ctx, 0x0684, 0x00010000);
+ for (i = 0x1b04; i <= 0x2374; i += 16) {
+ nv_wo32(ctx, (i + 0), 0x10700ff9);
+ nv_wo32(ctx, (i + 4), 0x0436086c);
+ nv_wo32(ctx, (i + 8), 0x000c001b);
+ }
+ nv_wo32(ctx, 0x2704, 0x3f800000);
+ nv_wo32(ctx, 0x2718, 0x3f800000);
+ nv_wo32(ctx, 0x2744, 0x40000000);
+ nv_wo32(ctx, 0x2748, 0x3f800000);
+ nv_wo32(ctx, 0x274c, 0x3f000000);
+ nv_wo32(ctx, 0x2754, 0x40000000);
+ nv_wo32(ctx, 0x2758, 0x3f800000);
+ nv_wo32(ctx, 0x2760, 0xbf800000);
+ nv_wo32(ctx, 0x2768, 0xbf800000);
+ nv_wo32(ctx, 0x308c, 0x000fe000);
+ nv_wo32(ctx, 0x3108, 0x000003f8);
+ nv_wo32(ctx, 0x3468, 0x002fe000);
+ for (i = 0x3484; i <= 0x34a0; i += 4)
+ nv_wo32(ctx, i, 0x001c527c);
+}
+
+static void
+nv2a_graph_context_init(struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(ctx, 0x033c, 0xffff0000);
+ nv_wo32(ctx, 0x03a0, 0x0fff0000);
+ nv_wo32(ctx, 0x03a4, 0x0fff0000);
+ nv_wo32(ctx, 0x047c, 0x00000101);
+ nv_wo32(ctx, 0x0490, 0x00000111);
+ nv_wo32(ctx, 0x04a8, 0x44400000);
+ for (i = 0x04d4; i <= 0x04e0; i += 4)
+ nv_wo32(ctx, i, 0x00030303);
+ for (i = 0x04f4; i <= 0x0500; i += 4)
+ nv_wo32(ctx, i, 0x00080000);
+ for (i = 0x050c; i <= 0x0518; i += 4)
+ nv_wo32(ctx, i, 0x01012000);
+ for (i = 0x051c; i <= 0x0528; i += 4)
+ nv_wo32(ctx, i, 0x000105b8);
+ for (i = 0x052c; i <= 0x0538; i += 4)
+ nv_wo32(ctx, i, 0x00080008);
+ for (i = 0x055c; i <= 0x0598; i += 4)
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x05a4, 0x4b7fffff);
+ nv_wo32(ctx, 0x05fc, 0x00000001);
+ nv_wo32(ctx, 0x0604, 0x00004000);
+ nv_wo32(ctx, 0x0610, 0x00000001);
+ nv_wo32(ctx, 0x0618, 0x00040000);
+ nv_wo32(ctx, 0x061c, 0x00010000);
+ for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
+ nv_wo32(ctx, (i + 0), 0x10700ff9);
+ nv_wo32(ctx, (i + 4), 0x0436086c);
+ nv_wo32(ctx, (i + 8), 0x000c001b);
+ }
+ nv_wo32(ctx, 0x269c, 0x3f800000);
+ nv_wo32(ctx, 0x26b0, 0x3f800000);
+ nv_wo32(ctx, 0x26dc, 0x40000000);
+ nv_wo32(ctx, 0x26e0, 0x3f800000);
+ nv_wo32(ctx, 0x26e4, 0x3f000000);
+ nv_wo32(ctx, 0x26ec, 0x40000000);
+ nv_wo32(ctx, 0x26f0, 0x3f800000);
+ nv_wo32(ctx, 0x26f8, 0xbf800000);
+ nv_wo32(ctx, 0x2700, 0xbf800000);
+ nv_wo32(ctx, 0x3024, 0x000fe000);
+ nv_wo32(ctx, 0x30a0, 0x000003f8);
+ nv_wo32(ctx, 0x33fc, 0x002fe000);
+ for (i = 0x341c; i <= 0x3438; i += 4)
+ nv_wo32(ctx, i, 0x001c527c);
+}
+
+static void
+nv30_31_graph_context_init(struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(ctx, 0x0410, 0x00000101);
+ nv_wo32(ctx, 0x0424, 0x00000111);
+ nv_wo32(ctx, 0x0428, 0x00000060);
+ nv_wo32(ctx, 0x0444, 0x00000080);
+ nv_wo32(ctx, 0x0448, 0xffff0000);
+ nv_wo32(ctx, 0x044c, 0x00000001);
+ nv_wo32(ctx, 0x0460, 0x44400000);
+ nv_wo32(ctx, 0x048c, 0xffff0000);
+ for (i = 0x04e0; i < 0x04e8; i += 4)
+ nv_wo32(ctx, i, 0x0fff0000);
+ nv_wo32(ctx, 0x04ec, 0x00011100);
+ for (i = 0x0508; i < 0x0548; i += 4)
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x0550, 0x4b7fffff);
+ nv_wo32(ctx, 0x058c, 0x00000080);
+ nv_wo32(ctx, 0x0590, 0x30201000);
+ nv_wo32(ctx, 0x0594, 0x70605040);
+ nv_wo32(ctx, 0x0598, 0xb8a89888);
+ nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
+ nv_wo32(ctx, 0x05b0, 0xb0000000);
+ for (i = 0x0600; i < 0x0640; i += 4)
+ nv_wo32(ctx, i, 0x00010588);
+ for (i = 0x0640; i < 0x0680; i += 4)
+ nv_wo32(ctx, i, 0x00030303);
+ for (i = 0x06c0; i < 0x0700; i += 4)
+ nv_wo32(ctx, i, 0x0008aae4);
+ for (i = 0x0700; i < 0x0740; i += 4)
+ nv_wo32(ctx, i, 0x01012000);
+ for (i = 0x0740; i < 0x0780; i += 4)
+ nv_wo32(ctx, i, 0x00080008);
+ nv_wo32(ctx, 0x085c, 0x00040000);
+ nv_wo32(ctx, 0x0860, 0x00010000);
+ for (i = 0x0864; i < 0x0874; i += 4)
+ nv_wo32(ctx, i, 0x00040004);
+ for (i = 0x1f18; i <= 0x3088 ; i += 16) {
+ nv_wo32(ctx, i + 0, 0x10700ff9);
+ nv_wo32(ctx, i + 1, 0x0436086c);
+ nv_wo32(ctx, i + 2, 0x000c001b);
+ }
+ for (i = 0x30b8; i < 0x30c8; i += 4)
+ nv_wo32(ctx, i, 0x0000ffff);
+ nv_wo32(ctx, 0x344c, 0x3f800000);
+ nv_wo32(ctx, 0x3808, 0x3f800000);
+ nv_wo32(ctx, 0x381c, 0x3f800000);
+ nv_wo32(ctx, 0x3848, 0x40000000);
+ nv_wo32(ctx, 0x384c, 0x3f800000);
+ nv_wo32(ctx, 0x3850, 0x3f000000);
+ nv_wo32(ctx, 0x3858, 0x40000000);
+ nv_wo32(ctx, 0x385c, 0x3f800000);
+ nv_wo32(ctx, 0x3864, 0xbf800000);
+ nv_wo32(ctx, 0x386c, 0xbf800000);
+}
+
+static void
+nv34_graph_context_init(struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(ctx, 0x040c, 0x01000101);
+ nv_wo32(ctx, 0x0420, 0x00000111);
+ nv_wo32(ctx, 0x0424, 0x00000060);
+ nv_wo32(ctx, 0x0440, 0x00000080);
+ nv_wo32(ctx, 0x0444, 0xffff0000);
+ nv_wo32(ctx, 0x0448, 0x00000001);
+ nv_wo32(ctx, 0x045c, 0x44400000);
+ nv_wo32(ctx, 0x0480, 0xffff0000);
+ for (i = 0x04d4; i < 0x04dc; i += 4)
+ nv_wo32(ctx, i, 0x0fff0000);
+ nv_wo32(ctx, 0x04e0, 0x00011100);
+ for (i = 0x04fc; i < 0x053c; i += 4)
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x0544, 0x4b7fffff);
+ nv_wo32(ctx, 0x057c, 0x00000080);
+ nv_wo32(ctx, 0x0580, 0x30201000);
+ nv_wo32(ctx, 0x0584, 0x70605040);
+ nv_wo32(ctx, 0x0588, 0xb8a89888);
+ nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
+ nv_wo32(ctx, 0x05a0, 0xb0000000);
+ for (i = 0x05f0; i < 0x0630; i += 4)
+ nv_wo32(ctx, i, 0x00010588);
+ for (i = 0x0630; i < 0x0670; i += 4)
+ nv_wo32(ctx, i, 0x00030303);
+ for (i = 0x06b0; i < 0x06f0; i += 4)
+ nv_wo32(ctx, i, 0x0008aae4);
+ for (i = 0x06f0; i < 0x0730; i += 4)
+ nv_wo32(ctx, i, 0x01012000);
+ for (i = 0x0730; i < 0x0770; i += 4)
+ nv_wo32(ctx, i, 0x00080008);
+ nv_wo32(ctx, 0x0850, 0x00040000);
+ nv_wo32(ctx, 0x0854, 0x00010000);
+ for (i = 0x0858; i < 0x0868; i += 4)
+ nv_wo32(ctx, i, 0x00040004);
+ for (i = 0x15ac; i <= 0x271c ; i += 16) {
+ nv_wo32(ctx, i + 0, 0x10700ff9);
+ nv_wo32(ctx, i + 1, 0x0436086c);
+ nv_wo32(ctx, i + 2, 0x000c001b);
+ }
+ for (i = 0x274c; i < 0x275c; i += 4)
+ nv_wo32(ctx, i, 0x0000ffff);
+ nv_wo32(ctx, 0x2ae0, 0x3f800000);
+ nv_wo32(ctx, 0x2e9c, 0x3f800000);
+ nv_wo32(ctx, 0x2eb0, 0x3f800000);
+ nv_wo32(ctx, 0x2edc, 0x40000000);
+ nv_wo32(ctx, 0x2ee0, 0x3f800000);
+ nv_wo32(ctx, 0x2ee4, 0x3f000000);
+ nv_wo32(ctx, 0x2eec, 0x40000000);
+ nv_wo32(ctx, 0x2ef0, 0x3f800000);
+ nv_wo32(ctx, 0x2ef8, 0xbf800000);
+ nv_wo32(ctx, 0x2f00, 0xbf800000);
+}
+
+static void
+nv35_36_graph_context_init(struct nouveau_gpuobj *ctx)
+{
+ int i;
+
+ nv_wo32(ctx, 0x040c, 0x00000101);
+ nv_wo32(ctx, 0x0420, 0x00000111);
+ nv_wo32(ctx, 0x0424, 0x00000060);
+ nv_wo32(ctx, 0x0440, 0x00000080);
+ nv_wo32(ctx, 0x0444, 0xffff0000);
+ nv_wo32(ctx, 0x0448, 0x00000001);
+ nv_wo32(ctx, 0x045c, 0x44400000);
+ nv_wo32(ctx, 0x0488, 0xffff0000);
+ for (i = 0x04dc; i < 0x04e4; i += 4)
+ nv_wo32(ctx, i, 0x0fff0000);
+ nv_wo32(ctx, 0x04e8, 0x00011100);
+ for (i = 0x0504; i < 0x0544; i += 4)
+ nv_wo32(ctx, i, 0x07ff0000);
+ nv_wo32(ctx, 0x054c, 0x4b7fffff);
+ nv_wo32(ctx, 0x0588, 0x00000080);
+ nv_wo32(ctx, 0x058c, 0x30201000);
+ nv_wo32(ctx, 0x0590, 0x70605040);
+ nv_wo32(ctx, 0x0594, 0xb8a89888);
+ nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
+ nv_wo32(ctx, 0x05ac, 0xb0000000);
+ for (i = 0x0604; i < 0x0644; i += 4)
+ nv_wo32(ctx, i, 0x00010588);
+ for (i = 0x0644; i < 0x0684; i += 4)
+ nv_wo32(ctx, i, 0x00030303);
+ for (i = 0x06c4; i < 0x0704; i += 4)
+ nv_wo32(ctx, i, 0x0008aae4);
+ for (i = 0x0704; i < 0x0744; i += 4)
+ nv_wo32(ctx, i, 0x01012000);
+ for (i = 0x0744; i < 0x0784; i += 4)
+ nv_wo32(ctx, i, 0x00080008);
+ nv_wo32(ctx, 0x0860, 0x00040000);
+ nv_wo32(ctx, 0x0864, 0x00010000);
+ for (i = 0x0868; i < 0x0878; i += 4)
+ nv_wo32(ctx, i, 0x00040004);
+ for (i = 0x1f1c; i <= 0x308c ; i += 16) {
+ nv_wo32(ctx, i + 0, 0x10700ff9);
+ nv_wo32(ctx, i + 4, 0x0436086c);
+ nv_wo32(ctx, i + 8, 0x000c001b);
+ }
+ for (i = 0x30bc; i < 0x30cc; i += 4)
+ nv_wo32(ctx, i, 0x0000ffff);
+ nv_wo32(ctx, 0x3450, 0x3f800000);
+ nv_wo32(ctx, 0x380c, 0x3f800000);
+ nv_wo32(ctx, 0x3820, 0x3f800000);
+ nv_wo32(ctx, 0x384c, 0x40000000);
+ nv_wo32(ctx, 0x3850, 0x3f800000);
+ nv_wo32(ctx, 0x3854, 0x3f000000);
+ nv_wo32(ctx, 0x385c, 0x40000000);
+ nv_wo32(ctx, 0x3860, 0x3f800000);
+ nv_wo32(ctx, 0x3868, 0xbf800000);
+ nv_wo32(ctx, 0x3870, 0xbf800000);
+}
+
+int
+nv20_graph_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
+ struct nouveau_gpuobj *grctx = NULL;
+ struct drm_device *dev = chan->dev;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &grctx);
+ if (ret)
+ return ret;
+
+ /* Initialise default context values */
+ pgraph->grctx_init(grctx);
+
+ /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
+ /* CTX_USER */
+ nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
+
+ nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4);
+ chan->engctx[engine] = grctx;
+ return 0;
+}
+
+void
+nv20_graph_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
+ struct nouveau_gpuobj *grctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+
+ /* Unload the context if it's the currently active one */
+ if (nv10_graph_channel(dev) == chan)
+ nv20_graph_unload_context(dev);
+
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the context resources */
+ nv_wo32(pgraph->ctxtab, chan->id * 4, 0);
+
+ nouveau_gpuobj_ref(NULL, &grctx);
+ chan->engctx[engine] = NULL;
+}
+
+static void
+nv20_graph_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
+
+ if (dev_priv->card_type == NV_20) {
+ nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
+ }
+}
+
+int
+nv20_graph_init(struct drm_device *dev, int engine)
+{
+ struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t tmp, vramsz;
+ int i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
+
+ nv20_graph_rdi(dev);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
+ nv_wr32(dev, 0x40009C , 0x00000040);
+
+ if (dev_priv->chipset >= 0x25) {
+ nv_wr32(dev, 0x400890, 0x00a8cfff);
+ nv_wr32(dev, 0x400610, 0x304B1FB6);
+ nv_wr32(dev, 0x400B80, 0x1cbd3883);
+ nv_wr32(dev, 0x400B84, 0x44000000);
+ nv_wr32(dev, 0x400098, 0x40000080);
+ nv_wr32(dev, 0x400B88, 0x000000ff);
+
+ } else {
+ nv_wr32(dev, 0x400880, 0x0008c7df);
+ nv_wr32(dev, 0x400094, 0x00000005);
+ nv_wr32(dev, 0x400B80, 0x45eae20e);
+ nv_wr32(dev, 0x400B84, 0x24000000);
+ nv_wr32(dev, 0x400098, 0x00000040);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
+ }
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv20_graph_set_tile_region(dev, i);
+
+ nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+
+ tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
+ nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
+ tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
+ nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
+
+ /* begin RAM config */
+ vramsz = pci_resource_len(dev->pdev, 0) - 1;
+ nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x400820, 0);
+ nv_wr32(dev, 0x400824, 0);
+ nv_wr32(dev, 0x400864, vramsz - 1);
+ nv_wr32(dev, 0x400868, vramsz - 1);
+
+ /* interesting.. the below overwrites some of the tile setup above.. */
+ nv_wr32(dev, 0x400B20, 0x00000000);
+ nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
+ nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+
+ return 0;
+}
+
+int
+nv30_graph_init(struct drm_device *dev, int engine)
+{
+ struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE,
+ nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
+
+ nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ nv_wr32(dev, 0x400890, 0x01b463ff);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
+ nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
+ nv_wr32(dev, 0x400B80, 0x1003d888);
+ nv_wr32(dev, 0x400B84, 0x0c000000);
+ nv_wr32(dev, 0x400098, 0x00000000);
+ nv_wr32(dev, 0x40009C, 0x0005ad00);
+ nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
+ nv_wr32(dev, 0x4000a0, 0x00000000);
+ nv_wr32(dev, 0x4000a4, 0x00000008);
+ nv_wr32(dev, 0x4008a8, 0xb784a400);
+ nv_wr32(dev, 0x400ba0, 0x002f8685);
+ nv_wr32(dev, 0x400ba4, 0x00231f3f);
+ nv_wr32(dev, 0x4008a4, 0x40000020);
+
+ if (dev_priv->chipset == 0x34) {
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
+ nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
+ nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
+ }
+
+ nv_wr32(dev, 0x4000c0, 0x00000016);
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+ nv20_graph_set_tile_region(dev, i);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+ nv_wr32(dev, 0x0040075c , 0x00000001);
+
+ /* begin RAM config */
+ /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */
+ nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+ if (dev_priv->chipset != 0x34) {
+ nv_wr32(dev, 0x400750, 0x00EA0000);
+ nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x400750, 0x00EA0004);
+ nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
+ }
+
+ return 0;
+}
+
+int
+nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
+ if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
+ nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
+ return -EBUSY;
+ }
+ nv20_graph_unload_context(dev);
+ nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
+ return 0;
+}
+
+static void
+nv20_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 chid = (addr & 0x01f00000) >> 20;
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ }
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, subc, class, mthd, data);
+ }
+ }
+}
+
+static void
+nv20_graph_destroy(struct drm_device *dev, int engine)
+{
+ struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 12);
+ nouveau_gpuobj_ref(NULL, &pgraph->ctxtab);
+
+ NVOBJ_ENGINE_DEL(dev, GR);
+ kfree(pgraph);
+}
+
+int
+nv20_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv20_graph_engine *pgraph;
+ int ret;
+
+ pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
+ if (!pgraph)
+ return -ENOMEM;
+
+ pgraph->base.destroy = nv20_graph_destroy;
+ pgraph->base.fini = nv20_graph_fini;
+ pgraph->base.context_new = nv20_graph_context_new;
+ pgraph->base.context_del = nv20_graph_context_del;
+ pgraph->base.object_new = nv04_graph_object_new;
+ pgraph->base.set_tile_region = nv20_graph_set_tile_region;
+
+ pgraph->grctx_user = 0x0028;
+ if (dev_priv->card_type == NV_20) {
+ pgraph->base.init = nv20_graph_init;
+ switch (dev_priv->chipset) {
+ case 0x20:
+ pgraph->grctx_init = nv20_graph_context_init;
+ pgraph->grctx_size = NV20_GRCTX_SIZE;
+ pgraph->grctx_user = 0x0000;
+ break;
+ case 0x25:
+ case 0x28:
+ pgraph->grctx_init = nv25_graph_context_init;
+ pgraph->grctx_size = NV25_GRCTX_SIZE;
+ break;
+ case 0x2a:
+ pgraph->grctx_init = nv2a_graph_context_init;
+ pgraph->grctx_size = NV2A_GRCTX_SIZE;
+ pgraph->grctx_user = 0x0000;
+ break;
+ default:
+ NV_ERROR(dev, "PGRAPH: unknown chipset\n");
+ kfree(pgraph);
+ return 0;
+ }
+ } else {
+ pgraph->base.init = nv30_graph_init;
+ switch (dev_priv->chipset) {
+ case 0x30:
+ case 0x31:
+ pgraph->grctx_init = nv30_31_graph_context_init;
+ pgraph->grctx_size = NV30_31_GRCTX_SIZE;
+ break;
+ case 0x34:
+ pgraph->grctx_init = nv34_graph_context_init;
+ pgraph->grctx_size = NV34_GRCTX_SIZE;
+ break;
+ case 0x35:
+ case 0x36:
+ pgraph->grctx_init = nv35_36_graph_context_init;
+ pgraph->grctx_size = NV35_36_GRCTX_SIZE;
+ break;
+ default:
+ NV_ERROR(dev, "PGRAPH: unknown chipset\n");
+ kfree(pgraph);
+ return 0;
+ }
+ }
+
+ /* Create Context Pointer Table */
+ ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC,
+ &pgraph->ctxtab);
+ if (ret) {
+ kfree(pgraph);
+ return ret;
+ }
+
+ NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
+ nouveau_irq_register(dev, 12, nv20_graph_isr);
+
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ if (dev_priv->card_type == NV_20) {
+ NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
+ NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
+
+ /* kelvin */
+ if (dev_priv->chipset < 0x25)
+ NVOBJ_CLASS(dev, 0x0097, GR);
+ else
+ NVOBJ_CLASS(dev, 0x0597, GR);
+ } else {
+ NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
+ NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
+ NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
+ NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
+
+ /* rankine */
+ if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0397, GR);
+ else
+ if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0697, GR);
+ else
+ if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
+ NVOBJ_CLASS(dev, 0x0497, GR);
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include <engine/fifo.h>
+#include <core/ramht.h>
+
+struct nv40_graph_engine {
+ struct nouveau_exec_engine base;
+ u32 grctx_size;
+};
+
+static int
+nv40_graph_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *grctx = NULL;
+ unsigned long flags;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC, &grctx);
+ if (ret)
+ return ret;
+
+ /* Initialise default context values */
+ nv40_grctx_fill(dev, grctx);
+ nv_wo32(grctx, 0, grctx->vinst);
+
+ /* init grctx pointer in ramfc, and on PFIFO if channel is
+ * already active there
+ */
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
+ if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
+ nv_wr32(dev, 0x0032e0, grctx->vinst >> 4);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ chan->engctx[engine] = grctx;
+ return 0;
+}
+
+static void
+nv40_graph_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nouveau_gpuobj *grctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 inst = 0x01000000 | (grctx->pinst >> 4);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
+ if (nv_rd32(dev, 0x40032c) == inst)
+ nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
+ if (nv_rd32(dev, 0x400330) == inst)
+ nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
+ nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ /* Free the context resources */
+ nouveau_gpuobj_ref(NULL, &grctx);
+ chan->engctx[engine] = NULL;
+}
+
+int
+nv40_graph_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 1;
+ obj->class = class;
+
+ nv_wo32(obj, 0x00, class);
+ nv_wo32(obj, 0x04, 0x00000000);
+#ifndef __BIG_ENDIAN
+ nv_wo32(obj, 0x08, 0x00000000);
+#else
+ nv_wo32(obj, 0x08, 0x01000000);
+#endif
+ nv_wo32(obj, 0x0c, 0x00000000);
+ nv_wo32(obj, 0x10, 0x00000000);
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static void
+nv40_graph_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x41: /* guess */
+ case 0x42:
+ case 0x43:
+ case 0x45: /* guess */
+ case 0x4e:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
+ break;
+ case 0x44:
+ case 0x4a:
+ nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
+ break;
+ case 0x46:
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ case 0x4c:
+ case 0x67:
+ default:
+ nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
+ nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
+ nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
+ nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
+ break;
+ }
+}
+
+/*
+ * G70 0x47
+ * G71 0x49
+ * NV45 0x48
+ * G72[M] 0x46
+ * G73 0x4b
+ * C51_G7X 0x4c
+ * C51 0x4e
+ */
+int
+nv40_graph_init(struct drm_device *dev, int engine)
+{
+ struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ uint32_t vramsz;
+ int i, j;
+
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
+ ~NV_PMC_ENABLE_PGRAPH);
+ nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
+ NV_PMC_ENABLE_PGRAPH);
+
+ /* generate and upload context program */
+ nv40_grctx_init(dev, &pgraph->grctx_size);
+
+ /* No context present currently */
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
+
+ nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
+ nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
+ nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
+ nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
+
+ nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
+ nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
+
+ j = nv_rd32(dev, 0x1540) & 0xff;
+ if (j) {
+ for (i = 0; !(j & 1); j >>= 1, i++)
+ ;
+ nv_wr32(dev, 0x405000, i);
+ }
+
+ if (dev_priv->chipset == 0x40) {
+ nv_wr32(dev, 0x4009b0, 0x83280fff);
+ nv_wr32(dev, 0x4009b4, 0x000000a0);
+ } else {
+ nv_wr32(dev, 0x400820, 0x83280eff);
+ nv_wr32(dev, 0x400824, 0x000000a0);
+ }
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x45:
+ nv_wr32(dev, 0x4009b8, 0x0078e366);
+ nv_wr32(dev, 0x4009bc, 0x0000014c);
+ break;
+ case 0x41:
+ case 0x42: /* pciid also 0x00Cx */
+ /* case 0x0120: XXX (pciid) */
+ nv_wr32(dev, 0x400828, 0x007596ff);
+ nv_wr32(dev, 0x40082c, 0x00000108);
+ break;
+ case 0x43:
+ nv_wr32(dev, 0x400828, 0x0072cb77);
+ nv_wr32(dev, 0x40082c, 0x00000108);
+ break;
+ case 0x44:
+ case 0x46: /* G72 */
+ case 0x4a:
+ case 0x4c: /* G7x-based C51 */
+ case 0x4e:
+ nv_wr32(dev, 0x400860, 0);
+ nv_wr32(dev, 0x400864, 0);
+ break;
+ case 0x47: /* G70 */
+ case 0x49: /* G71 */
+ case 0x4b: /* G73 */
+ nv_wr32(dev, 0x400828, 0x07830610);
+ nv_wr32(dev, 0x40082c, 0x0000016A);
+ break;
+ default:
+ break;
+ }
+
+ nv_wr32(dev, 0x400b38, 0x2ffff800);
+ nv_wr32(dev, 0x400b3c, 0x00006000);
+
+ /* Tiling related stuff. */
+ switch (dev_priv->chipset) {
+ case 0x44:
+ case 0x4a:
+ nv_wr32(dev, 0x400bc4, 0x1003d888);
+ nv_wr32(dev, 0x400bbc, 0xb7a7b500);
+ break;
+ case 0x46:
+ nv_wr32(dev, 0x400bc4, 0x0000e024);
+ nv_wr32(dev, 0x400bbc, 0xb7a7b520);
+ break;
+ case 0x4c:
+ case 0x4e:
+ case 0x67:
+ nv_wr32(dev, 0x400bc4, 0x1003d888);
+ nv_wr32(dev, 0x400bbc, 0xb7a7b540);
+ break;
+ default:
+ break;
+ }
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ nv40_graph_set_tile_region(dev, i);
+
+ /* begin RAM config */
+ vramsz = pci_resource_len(dev->pdev, 0) - 1;
+ switch (dev_priv->chipset) {
+ case 0x40:
+ nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x400820, 0);
+ nv_wr32(dev, 0x400824, 0);
+ nv_wr32(dev, 0x400864, vramsz);
+ nv_wr32(dev, 0x400868, vramsz);
+ break;
+ default:
+ switch (dev_priv->chipset) {
+ case 0x41:
+ case 0x42:
+ case 0x43:
+ case 0x45:
+ case 0x4e:
+ case 0x44:
+ case 0x4a:
+ nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
+ break;
+ default:
+ nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
+ break;
+ }
+ nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
+ nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
+ nv_wr32(dev, 0x400840, 0);
+ nv_wr32(dev, 0x400844, 0);
+ nv_wr32(dev, 0x4008A0, vramsz);
+ nv_wr32(dev, 0x4008A4, vramsz);
+ break;
+ }
+
+ return 0;
+}
+
+static int
+nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ u32 inst = nv_rd32(dev, 0x40032c);
+ if (inst & 0x01000000) {
+ nv_wr32(dev, 0x400720, 0x00000000);
+ nv_wr32(dev, 0x400784, inst);
+ nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
+ nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
+ if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
+ u32 insn = nv_rd32(dev, 0x400308);
+ NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
+ }
+ nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
+ }
+ return 0;
+}
+
+static int
+nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *grctx;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < pfifo->channels; i++) {
+ if (!dev_priv->channels.ptr[i])
+ continue;
+ grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
+
+ if (grctx && grctx->pinst == inst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nv40_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
+ u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
+ u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
+ u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
+ u32 chid = nv40_graph_isr_chid(dev, inst);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
+ u32 show = stat;
+
+ if (stat & NV_PGRAPH_INTR_ERROR) {
+ if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
+ show &= ~NV_PGRAPH_INTR_ERROR;
+ } else
+ if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
+ nv_mask(dev, 0x402000, 0, 0);
+ }
+ }
+
+ nv_wr32(dev, NV03_PGRAPH_INTR, stat);
+ nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv10_graph_intr, show);
+ printk(" nsource:");
+ nouveau_bitfield_print(nv04_graph_nsource, nsource);
+ printk(" nstatus:");
+ nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ }
+}
+
+static void
+nv40_graph_destroy(struct drm_device *dev, int engine)
+{
+ struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 12);
+
+ NVOBJ_ENGINE_DEL(dev, GR);
+ kfree(pgraph);
+}
+
+int
+nv40_graph_create(struct drm_device *dev)
+{
+ struct nv40_graph_engine *pgraph;
+
+ pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
+ if (!pgraph)
+ return -ENOMEM;
+
+ pgraph->base.destroy = nv40_graph_destroy;
+ pgraph->base.init = nv40_graph_init;
+ pgraph->base.fini = nv40_graph_fini;
+ pgraph->base.context_new = nv40_graph_context_new;
+ pgraph->base.context_del = nv40_graph_context_del;
+ pgraph->base.object_new = nv40_graph_object_new;
+ pgraph->base.set_tile_region = nv40_graph_set_tile_region;
+
+ NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
+ nouveau_irq_register(dev, 12, nv40_graph_isr);
+
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
+ NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
+ NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
+ NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
+ NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
+ NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
+ NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
+ NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
+ NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
+ NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
+ NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
+ NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
+ NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
+
+ /* curie */
+ if (nv44_graph_class(dev))
+ NVOBJ_CLASS(dev, 0x4497, GR);
+ else
+ NVOBJ_CLASS(dev, 0x4097, GR);
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include <engine/fifo.h>
+#include <core/ramht.h>
+#include "nouveau_dma.h"
+#include <subdev/vm.h>
+#include "nv50_evo.h"
+
+struct nv50_graph_engine {
+ struct nouveau_exec_engine base;
+ u32 ctxprog[512];
+ u32 ctxprog_size;
+ u32 grctx_size;
+};
+
+static int
+nv50_graph_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
+ u32 units = nv_rd32(dev, 0x001540);
+ int i;
+
+ NV_DEBUG(dev, "\n");
+
+ /* master reset */
+ nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
+ nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
+
+ /* reset/enable traps and interrupts */
+ nv_wr32(dev, 0x400804, 0xc0000000);
+ nv_wr32(dev, 0x406800, 0xc0000000);
+ nv_wr32(dev, 0x400c04, 0xc0000000);
+ nv_wr32(dev, 0x401800, 0xc0000000);
+ nv_wr32(dev, 0x405018, 0xc0000000);
+ nv_wr32(dev, 0x402000, 0xc0000000);
+ for (i = 0; i < 16; i++) {
+ if (!(units & (1 << i)))
+ continue;
+
+ if (dev_priv->chipset < 0xa0) {
+ nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
+ nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
+ nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
+ } else {
+ nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
+ nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
+ nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
+ }
+ }
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, 0x400100, 0xffffffff);
+ nv_wr32(dev, 0x40013c, 0xffffffff);
+ nv_wr32(dev, 0x400500, 0x00010001);
+
+ /* upload context program, initialise ctxctl defaults */
+ nv_wr32(dev, 0x400324, 0x00000000);
+ for (i = 0; i < pgraph->ctxprog_size; i++)
+ nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
+ nv_wr32(dev, 0x400824, 0x00000000);
+ nv_wr32(dev, 0x400828, 0x00000000);
+ nv_wr32(dev, 0x40082c, 0x00000000);
+ nv_wr32(dev, 0x400830, 0x00000000);
+ nv_wr32(dev, 0x400724, 0x00000000);
+ nv_wr32(dev, 0x40032c, 0x00000000);
+ nv_wr32(dev, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
+
+ /* some unknown zcull magic */
+ switch (dev_priv->chipset & 0xf0) {
+ case 0x50:
+ case 0x80:
+ case 0x90:
+ nv_wr32(dev, 0x402ca8, 0x00000800);
+ break;
+ case 0xa0:
+ default:
+ nv_wr32(dev, 0x402cc0, 0x00000000);
+ if (dev_priv->chipset == 0xa0 ||
+ dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac) {
+ nv_wr32(dev, 0x402ca8, 0x00000802);
+ } else {
+ nv_wr32(dev, 0x402cc0, 0x00000000);
+ nv_wr32(dev, 0x402ca8, 0x00000002);
+ }
+
+ break;
+ }
+
+ /* zero out zcull regions */
+ for (i = 0; i < 8; i++) {
+ nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
+ nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
+ }
+
+ return 0;
+}
+
+static int
+nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ nv_wr32(dev, 0x40013c, 0x00000000);
+ return 0;
+}
+
+static int
+nv50_graph_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin;
+ struct nouveau_gpuobj *grctx = NULL;
+ struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
+ int hdr, ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &grctx);
+ if (ret)
+ return ret;
+
+ hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
+ nv_wo32(ramin, hdr + 0x00, 0x00190002);
+ nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
+ nv_wo32(ramin, hdr + 0x08, grctx->vinst);
+ nv_wo32(ramin, hdr + 0x0c, 0);
+ nv_wo32(ramin, hdr + 0x10, 0);
+ nv_wo32(ramin, hdr + 0x14, 0x00010000);
+
+ nv50_grctx_fill(dev, grctx);
+ nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
+
+ dev_priv->engine.instmem.flush(dev);
+
+ atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
+ chan->engctx[NVOBJ_ENGINE_GR] = grctx;
+ return 0;
+}
+
+static void
+nv50_graph_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nouveau_gpuobj *grctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
+
+ for (i = hdr; i < hdr + 24; i += 4)
+ nv_wo32(chan->ramin, i, 0);
+ dev_priv->engine.instmem.flush(dev);
+
+ atomic_dec(&chan->vm->engref[engine]);
+ nouveau_gpuobj_ref(NULL, &grctx);
+ chan->engctx[engine] = NULL;
+}
+
+static int
+nv50_graph_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 1;
+ obj->class = class;
+
+ nv_wo32(obj, 0x00, class);
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static void
+nv50_graph_tlb_flush(struct drm_device *dev, int engine)
+{
+ nv50_vm_flush_engine(dev, 0);
+}
+
+static void
+nv84_graph_tlb_flush(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+ bool idle, timeout = false;
+ unsigned long flags;
+ u64 start;
+ u32 tmp;
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
+
+ start = ptimer->read(dev);
+ do {
+ idle = true;
+
+ for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+
+ for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+
+ for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
+ if ((tmp & 7) == 1)
+ idle = false;
+ }
+ } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
+
+ if (timeout) {
+ NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
+ "0x%08x 0x%08x 0x%08x 0x%08x\n",
+ nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
+ nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
+ }
+
+ nv50_vm_flush_engine(dev, 0);
+
+ nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+}
+
+static struct nouveau_enum nv50_mp_exec_error_names[] = {
+ { 3, "STACK_UNDERFLOW", NULL },
+ { 4, "QUADON_ACTIVE", NULL },
+ { 8, "TIMEOUT", NULL },
+ { 0x10, "INVALID_OPCODE", NULL },
+ { 0x40, "BREAKPOINT", NULL },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
+ { 0x00000001, "NOTIFY" },
+ { 0x00000002, "IN" },
+ { 0x00000004, "OUT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
+ { 0x00000001, "FAULT" },
+ {}
+};
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+struct nouveau_enum nv50_data_error_names[] = {
+ { 0x00000003, "INVALID_OPERATION", NULL },
+ { 0x00000004, "INVALID_VALUE", NULL },
+ { 0x00000005, "INVALID_ENUM", NULL },
+ { 0x00000008, "INVALID_OBJECT", NULL },
+ { 0x00000009, "READ_ONLY_OBJECT", NULL },
+ { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
+ { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
+ { 0x0000000c, "INVALID_BITFIELD", NULL },
+ { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
+ { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
+ { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
+ { 0x00000010, "RT_DOUBLE_BIND", NULL },
+ { 0x00000011, "RT_TYPES_MISMATCH", NULL },
+ { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
+ { 0x00000015, "FP_TOO_FEW_REGS", NULL },
+ { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
+ { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
+ { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
+ { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
+ { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
+ { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
+ { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
+ { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
+ { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
+ { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
+ { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
+ { 0x00000024, "VP_ZERO_INPUTS", NULL },
+ { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
+ { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
+ { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
+ { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
+ { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
+ { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
+ { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
+ { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
+ { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
+ { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
+ { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
+ { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
+ { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
+ { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
+ { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
+ {}
+};
+
+static struct nouveau_bitfield nv50_graph_intr[] = {
+ { 0x00000001, "NOTIFY" },
+ { 0x00000002, "COMPUTE_QUERY" },
+ { 0x00000010, "ILLEGAL_MTHD" },
+ { 0x00000020, "ILLEGAL_CLASS" },
+ { 0x00000040, "DOUBLE_NOTIFY" },
+ { 0x00001000, "CONTEXT_SWITCH" },
+ { 0x00010000, "BUFFER_NOTIFY" },
+ { 0x00100000, "DATA_ERROR" },
+ { 0x00200000, "TRAP" },
+ { 0x01000000, "SINGLE_STEP" },
+ {}
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ uint32_t addr, mp10, status, pc, oplow, ophigh;
+ int i;
+ int mps = 0;
+ for (i = 0; i < 4; i++) {
+ if (!(units & 1 << (i+24)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ addr = 0x408200 + (tpid << 12) + (i << 7);
+ else
+ addr = 0x408100 + (tpid << 11) + (i << 7);
+ mp10 = nv_rd32(dev, addr + 0x10);
+ status = nv_rd32(dev, addr + 0x14);
+ if (!status)
+ continue;
+ if (display) {
+ nv_rd32(dev, addr + 0x20);
+ pc = nv_rd32(dev, addr + 0x24);
+ oplow = nv_rd32(dev, addr + 0x70);
+ ophigh = nv_rd32(dev, addr + 0x74);
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+ "TP %d MP %d: ", tpid, i);
+ nouveau_enum_print(nv50_mp_exec_error_names, status);
+ printk(" at %06x warp %d, opcode %08x %08x\n",
+ pc&0xffffff, pc >> 24,
+ oplow, ophigh);
+ }
+ nv_wr32(dev, addr + 0x10, mp10);
+ nv_wr32(dev, addr + 0x14, 0);
+ mps++;
+ }
+ if (!mps && display)
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+ "No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+ uint32_t ustatus_new, int display, const char *name)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int tps = 0;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ int i, r;
+ uint32_t ustatus_addr, ustatus;
+ for (i = 0; i < 16; i++) {
+ if (!(units & (1 << i)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ ustatus_addr = ustatus_old + (i << 12);
+ else
+ ustatus_addr = ustatus_new + (i << 11);
+ ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+ if (!ustatus)
+ continue;
+ tps++;
+ switch (type) {
+ case 6: /* texture error... unknown for now */
+ if (display) {
+ NV_ERROR(dev, "magic set %d:\n", i);
+ for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ }
+ break;
+ case 7: /* MP error */
+ if (ustatus & 0x04030000) {
+ nv50_pgraph_mp_trap(dev, i, display);
+ ustatus &= ~0x04030000;
+ }
+ break;
+ case 8: /* TPDMA error */
+ {
+ uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+ uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+ uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+ uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+ uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+ uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+ uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+ /* 2d engine destination */
+ if (ustatus & 0x00000010) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000010;
+ }
+ /* Render target */
+ if (ustatus & 0x00000040) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000040;
+ }
+ /* CUDA memory: l[], g[] or stack. */
+ if (ustatus & 0x00000080) {
+ if (display) {
+ if (e18 & 0x80000000) {
+ /* g[] read fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 24) & 0x1f));
+ e18 &= ~0x1f000000;
+ } else if (e18 & 0xc) {
+ /* g[] write fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 7) & 0x1f));
+ e18 &= ~0x00000f80;
+ } else {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+ i, e14, e10);
+ }
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000080;
+ }
+ }
+ break;
+ }
+ if (ustatus) {
+ if (display)
+ NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ }
+ nv_wr32(dev, ustatus_addr, 0xc0000000);
+ }
+
+ if (!tps && display)
+ NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static int
+nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
+{
+ u32 status = nv_rd32(dev, 0x400108);
+ u32 ustatus;
+
+ if (!status && display) {
+ NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
+ return 1;
+ }
+
+ /* DISPATCH: Relays commands to other units and handles NOTIFY,
+ * COND, QUERY. If you get a trap from it, the command is still stuck
+ * in DISPATCH and you need to do something about it. */
+ if (status & 0x001) {
+ ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+ }
+
+ nv_wr32(dev, 0x400500, 0x00000000);
+
+ /* Known to be triggered by screwed up NOTIFY and COND... */
+ if (ustatus & 0x00000001) {
+ u32 addr = nv_rd32(dev, 0x400808);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 datal = nv_rd32(dev, 0x40080c);
+ u32 datah = nv_rd32(dev, 0x400810);
+ u32 class = nv_rd32(dev, 0x400814);
+ u32 r848 = nv_rd32(dev, 0x400848);
+
+ NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
+ if (display && (addr & 0x80000000)) {
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x%08x "
+ "400808 0x%08x 400848 0x%08x\n",
+ chid, inst, subc, class, mthd, datah,
+ datal, addr, r848);
+ } else
+ if (display) {
+ NV_INFO(dev, "PGRAPH - no stuck command?\n");
+ }
+
+ nv_wr32(dev, 0x400808, 0);
+ nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+ nv_wr32(dev, 0x400848, 0);
+ ustatus &= ~0x00000001;
+ }
+
+ if (ustatus & 0x00000002) {
+ u32 addr = nv_rd32(dev, 0x40084c);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, 0x40085c);
+ u32 class = nv_rd32(dev, 0x400814);
+
+ NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
+ if (display && (addr & 0x80000000)) {
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x 40084c 0x%08x\n",
+ chid, inst, subc, class, mthd,
+ data, addr);
+ } else
+ if (display) {
+ NV_INFO(dev, "PGRAPH - no stuck command?\n");
+ }
+
+ nv_wr32(dev, 0x40084c, 0);
+ ustatus &= ~0x00000002;
+ }
+
+ if (ustatus && display) {
+ NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
+ "0x%08x)\n", ustatus);
+ }
+
+ nv_wr32(dev, 0x400804, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x001);
+ status &= ~0x001;
+ if (!status)
+ return 0;
+ }
+
+ /* M2MF: Memory to memory copy engine. */
+ if (status & 0x002) {
+ u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_M2MF");
+ nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
+ nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
+
+ }
+
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 2);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x406800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x002);
+ status &= ~0x002;
+ }
+
+ /* VFETCH: Fetches data from vertex buffers. */
+ if (status & 0x004) {
+ u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
+ nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
+ nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
+ }
+
+ nv_wr32(dev, 0x400c04, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x004);
+ status &= ~0x004;
+ }
+
+ /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+ if (status & 0x008) {
+ ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
+ nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
+ nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
+
+ }
+
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 0x80);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x401800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x008);
+ status &= ~0x008;
+ }
+
+ /* CCACHE: Handles code and c[] caches and fills them. */
+ if (status & 0x010) {
+ ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+ if (display) {
+ NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
+ nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
+ " %08x %08x %08x\n",
+ nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
+ nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
+ nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
+ nv_rd32(dev, 0x40501c));
+
+ }
+
+ nv_wr32(dev, 0x405018, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x010);
+ status &= ~0x010;
+ }
+
+ /* Unknown, not seen yet... 0x402000 is the only trap status reg
+ * remaining, so try to handle it anyway. Perhaps related to that
+ * unknown DMA slot on tesla? */
+ if (status & 0x20) {
+ ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+ if (display)
+ NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x402000, 0xc0000000);
+ /* no status modifiction on purpose */
+ }
+
+ /* TEXTURE: CUDA texturing units */
+ if (status & 0x040) {
+ nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
+ "PGRAPH - TRAP_TEXTURE");
+ nv_wr32(dev, 0x400108, 0x040);
+ status &= ~0x040;
+ }
+
+ /* MP: CUDA execution engines. */
+ if (status & 0x080) {
+ nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
+ "PGRAPH - TRAP_MP");
+ nv_wr32(dev, 0x400108, 0x080);
+ status &= ~0x080;
+ }
+
+ /* TPDMA: Handles TP-initiated uncached memory accesses:
+ * l[], g[], stack, 2d surfaces, render targets. */
+ if (status & 0x100) {
+ nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
+ "PGRAPH - TRAP_TPDMA");
+ nv_wr32(dev, 0x400108, 0x100);
+ status &= ~0x100;
+ }
+
+ if (status) {
+ if (display)
+ NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
+ nv_wr32(dev, 0x400108, status);
+ }
+
+ return 1;
+}
+
+int
+nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < pfifo->channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nv50_graph_isr(struct drm_device *dev)
+{
+ u32 stat;
+
+ while ((stat = nv_rd32(dev, 0x400100))) {
+ u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
+ u32 chid = nv50_graph_isr_chid(dev, inst);
+ u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 mthd = (addr & 0x00001ffc);
+ u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
+ u32 class = nv_rd32(dev, 0x400814);
+ u32 show = stat;
+
+ if (stat & 0x00000010) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
+ mthd, data))
+ show &= ~0x00000010;
+ }
+
+ show = (show && nouveau_ratelimit()) ? show : 0;
+
+ if (show & 0x00100000) {
+ u32 ecode = nv_rd32(dev, 0x400110);
+ NV_INFO(dev, "PGRAPH - DATA_ERROR ");
+ nouveau_enum_print(nv50_data_error_names, ecode);
+ printk("\n");
+ }
+
+ if (stat & 0x00200000) {
+ if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
+ show &= ~0x00200000;
+ }
+
+ nv_wr32(dev, 0x400100, stat);
+ nv_wr32(dev, 0x400500, 0x00010001);
+
+ if (show) {
+ NV_INFO(dev, "PGRAPH -");
+ nouveau_bitfield_print(nv50_graph_intr, show);
+ printk("\n");
+ NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv50_fb_vm_trap(dev, 1);
+ }
+ }
+
+ if (nv_rd32(dev, 0x400824) & (1 << 31))
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+}
+
+static void
+nv50_graph_destroy(struct drm_device *dev, int engine)
+{
+ struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, GR);
+
+ nouveau_irq_unregister(dev, 12);
+ kfree(pgraph);
+}
+
+int
+nv50_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_graph_engine *pgraph;
+ int ret;
+
+ pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
+ if (!pgraph)
+ return -ENOMEM;
+
+ ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
+ &pgraph->ctxprog_size,
+ &pgraph->grctx_size);
+ if (ret) {
+ NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
+ kfree(pgraph);
+ return 0;
+ }
+
+ pgraph->base.destroy = nv50_graph_destroy;
+ pgraph->base.init = nv50_graph_init;
+ pgraph->base.fini = nv50_graph_fini;
+ pgraph->base.context_new = nv50_graph_context_new;
+ pgraph->base.context_del = nv50_graph_context_del;
+ pgraph->base.object_new = nv50_graph_object_new;
+ if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
+ pgraph->base.tlb_flush = nv50_graph_tlb_flush;
+ else
+ pgraph->base.tlb_flush = nv84_graph_tlb_flush;
+
+ nouveau_irq_register(dev, 12, nv50_graph_isr);
+
+ NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
+ NVOBJ_CLASS(dev, 0x0030, GR); /* null */
+ NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
+ NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
+
+ /* tesla */
+ if (dev_priv->chipset == 0x50)
+ NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
+ else
+ if (dev_priv->chipset < 0xa0)
+ NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
+ else {
+ switch (dev_priv->chipset) {
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ NVOBJ_CLASS(dev, 0x8397, GR);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ NVOBJ_CLASS(dev, 0x8597, GR);
+ break;
+ case 0xaf:
+ NVOBJ_CLASS(dev, 0x8697, GR);
+ break;
+ }
+ }
+
+ /* compute */
+ NVOBJ_CLASS(dev, 0x50c0, GR);
+ if (dev_priv->chipset > 0xa0 &&
+ dev_priv->chipset != 0xaa &&
+ dev_priv->chipset != 0xac)
+ NVOBJ_CLASS(dev, 0x85c0, GR);
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include <core/mm.h>
+#include <engine/fifo.h>
+
+#include "nvc0.h"
+#include "fuc/hubnvc0.fuc.h"
+#include "fuc/gpcnvc0.fuc.h"
+
+static void
+nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
+{
+ NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
+ nv_rd32(dev, base + 0x400));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
+ nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
+ nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
+}
+
+static void
+nvc0_graph_ctxctl_debug(struct drm_device *dev)
+{
+ u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
+ u32 gpc;
+
+ nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
+ for (gpc = 0; gpc < gpcnr; gpc++)
+ nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
+}
+
+static int
+nvc0_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, 0x409840, 0x00000030);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000003);
+ if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
+ NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
+
+ return 0;
+}
+
+static int
+nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
+{
+ nv_wr32(dev, 0x409840, 0x00000003);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
+ nv_wr32(dev, 0x409504, 0x00000009);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvc0_graph_construct_context(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ int ret, i;
+ u32 *ctx;
+
+ ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (!nouveau_ctxfw) {
+ nv_wr32(dev, 0x409840, 0x80000000);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000001);
+ if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
+ NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
+ nvc0_graph_ctxctl_debug(dev);
+ ret = -EBUSY;
+ goto err;
+ }
+ } else {
+ nvc0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
+ }
+
+ ret = nvc0_grctx_generate(chan);
+ if (ret)
+ goto err;
+
+ if (!nouveau_ctxfw) {
+ nv_wr32(dev, 0x409840, 0x80000000);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
+ NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
+ nvc0_graph_ctxctl_debug(dev);
+ ret = -EBUSY;
+ goto err;
+ }
+ } else {
+ ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret)
+ goto err;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ ctx[i / 4] = nv_ro32(grch->grctx, i);
+
+ priv->grctx_vals = ctx;
+ return 0;
+
+err:
+ kfree(ctx);
+ return ret;
+}
+
+static int
+nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+{
+ struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i = 0, gpc, tp, ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
+ &grch->unk408004);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
+ &grch->unk40800c);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
+ &grch->unk418810);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
+ &grch->mmio);
+ if (ret)
+ return ret;
+
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00408004);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00408008);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00408010);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00418810);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
+ nv_wo32(grch->mmio, i++ * 4, 0x00419848);
+ nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00419004);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x00419008);
+ nv_wo32(grch->mmio, i++ * 4, 0x00000000);
+
+ nv_wo32(grch->mmio, i++ * 4, 0x00418808);
+ nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
+ nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
+ nv_wo32(grch->mmio, i++ * 4, 0x80000018);
+
+ if (dev_priv->chipset != 0xc1) {
+ u32 magic = 0x02180000;
+ nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+ u32 reg = TP_UNIT(gpc, tp, 0x520);
+ nv_wo32(grch->mmio, i++ * 4, reg);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ magic += 0x0324;
+ }
+ }
+ } else {
+ u32 magic = 0x02180000;
+ nv_wo32(grch->mmio, i++ * 4, 0x00405830);
+ nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
+ nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
+ nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+ u32 reg = TP_UNIT(gpc, tp, 0x520);
+ nv_wo32(grch->mmio, i++ * 4, reg);
+ nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
+ magic += 0x0324;
+ }
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+ u32 reg = TP_UNIT(gpc, tp, 0x544);
+ nv_wo32(grch->mmio, i++ * 4, reg);
+ nv_wo32(grch->mmio, i++ * 4, magic);
+ magic += 0x0324;
+ }
+ }
+ }
+
+ grch->mmio_nr = i / 2;
+ return 0;
+}
+
+static int
+nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nvc0_graph_priv *priv = nv_engine(dev, engine);
+ struct nvc0_graph_chan *grch;
+ struct nouveau_gpuobj *grctx;
+ int ret, i;
+
+ grch = kzalloc(sizeof(*grch), GFP_KERNEL);
+ if (!grch)
+ return -ENOMEM;
+ chan->engctx[NVOBJ_ENGINE_GR] = grch;
+
+ ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
+ &grch->grctx);
+ if (ret)
+ goto error;
+ grctx = grch->grctx;
+
+ ret = nvc0_graph_create_context_mmio_list(chan);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
+ pinstmem->flush(dev);
+
+ if (!priv->grctx_vals) {
+ ret = nvc0_graph_construct_context(chan);
+ if (ret)
+ goto error;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
+
+ if (!nouveau_ctxfw) {
+ nv_wo32(grctx, 0x00, grch->mmio_nr);
+ nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
+ } else {
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
+ }
+ pinstmem->flush(dev);
+ return 0;
+
+error:
+ priv->base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nvc0_graph_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nvc0_graph_chan *grch = chan->engctx[engine];
+
+ nouveau_gpuobj_ref(NULL, &grch->mmio);
+ nouveau_gpuobj_ref(NULL, &grch->unk418810);
+ nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+ nouveau_gpuobj_ref(NULL, &grch->unk408004);
+ nouveau_gpuobj_ref(NULL, &grch->grctx);
+ chan->engctx[engine] = NULL;
+}
+
+static int
+nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ return 0;
+}
+
+static int
+nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nvc0_graph_init_obj418880(struct drm_device *dev)
+{
+ struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int i;
+
+ nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
+ nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
+}
+
+static void
+nvc0_graph_init_regs(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400080, 0x003083c2);
+ nv_wr32(dev, 0x400088, 0x00006fe7);
+ nv_wr32(dev, 0x40008c, 0x00000000);
+ nv_wr32(dev, 0x400090, 0x00000030);
+ nv_wr32(dev, 0x40013c, 0x013901f7);
+ nv_wr32(dev, 0x400140, 0x00000100);
+ nv_wr32(dev, 0x400144, 0x00000000);
+ nv_wr32(dev, 0x400148, 0x00000110);
+ nv_wr32(dev, 0x400138, 0x00000000);
+ nv_wr32(dev, 0x400130, 0x00000000);
+ nv_wr32(dev, 0x400134, 0x00000000);
+ nv_wr32(dev, 0x400124, 0x00000002);
+}
+
+static void
+nvc0_graph_init_gpc_0(struct drm_device *dev)
+{
+ struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
+ u32 data[TP_MAX / 8];
+ u8 tpnr[GPC_MAX];
+ int i, gpc, tpc;
+
+ nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
+ /*
+ * TP ROP UNKVAL(magic_not_rop_nr)
+ * 450: 4/0/0/0 2 3
+ * 460: 3/4/0/0 4 1
+ * 465: 3/4/4/0 4 7
+ * 470: 3/3/4/4 5 5
+ * 480: 3/4/4/4 6 6
+ */
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
+ for (i = 0, gpc = -1; i < priv->tp_total; i++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpnr[gpc]);
+ tpc = priv->tp_nr[gpc] - tpnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
+ nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
+ nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
+ nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+ priv->tp_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
+}
+
+static void
+nvc0_graph_init_units(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x409c24, 0x000f0000);
+ nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
+ nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
+ nv_wr32(dev, 0x408030, 0xc0000000);
+ nv_wr32(dev, 0x40601c, 0xc0000000);
+ nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
+ nv_wr32(dev, 0x406018, 0xc0000000);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x405844, 0x00ffffff);
+ nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
+ nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+}
+
+static void
+nvc0_graph_init_gpc_1(struct drm_device *dev)
+{
+ struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int gpc, tp;
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
+ nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
+ }
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+}
+
+static void
+nvc0_graph_init_rop(struct drm_device *dev)
+{
+ struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int rop;
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+}
+
+static void
+nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
+ struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
+{
+ int i;
+
+ nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+ for (i = 0; i < data->size / 4; i++)
+ nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
+
+ nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+ for (i = 0; i < code->size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, fuc_base + 0x0188, i >> 6);
+ nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
+ }
+}
+
+static int
+nvc0_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ u32 r000260;
+ int i;
+
+ if (!nouveau_ctxfw) {
+ /* load HUB microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x4091c0, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
+ nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
+
+ nv_wr32(dev, 0x409180, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, 0x409188, i >> 6);
+ nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
+ }
+
+ /* load GPC microcode */
+ nv_wr32(dev, 0x41a1c0, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
+ nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
+
+ nv_wr32(dev, 0x41a180, 0x01000000);
+ for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, 0x41a188, i >> 6);
+ nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
+ }
+ nv_wr32(dev, 0x000260, r000260);
+
+ /* start HUB ucode running, it'll init the GPCs */
+ nv_wr32(dev, 0x409800, dev_priv->chipset);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
+ NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
+ nvc0_graph_ctxctl_debug(dev);
+ return -EBUSY;
+ }
+
+ priv->grctx_size = nv_rd32(dev, 0x409804);
+ return 0;
+ }
+
+ /* load fuc microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
+ nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
+ nv_wr32(dev, 0x000260, r000260);
+
+ /* start both of them running */
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x41a10c, 0x00000000);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x41a100, 0x00000002);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
+ NV_INFO(dev, "0x409800 wait failed\n");
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x7fffffff);
+ nv_wr32(dev, 0x409504, 0x00000021);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000010);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+ return -EBUSY;
+ }
+ priv->grctx_size = nv_rd32(dev, 0x409800);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000016);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000025);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nvc0_graph_init(struct drm_device *dev, int engine)
+{
+ int ret;
+
+ nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
+
+ nvc0_graph_init_obj418880(dev);
+ nvc0_graph_init_regs(dev);
+ /*nvc0_graph_init_unitplemented_magics(dev);*/
+ nvc0_graph_init_gpc_0(dev);
+ /*nvc0_graph_init_unitplemented_c242(dev);*/
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+ nv_wr32(dev, 0x400100, 0xffffffff);
+ nv_wr32(dev, 0x40013c, 0xffffffff);
+
+ nvc0_graph_init_units(dev);
+ nvc0_graph_init_gpc_1(dev);
+ nvc0_graph_init_rop(dev);
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, 0x400118, 0xffffffff);
+ nv_wr32(dev, 0x400130, 0xffffffff);
+ nv_wr32(dev, 0x40011c, 0xffffffff);
+ nv_wr32(dev, 0x400134, 0xffffffff);
+ nv_wr32(dev, 0x400054, 0x34ce3464);
+
+ ret = nvc0_graph_init_ctxctl(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < pfifo->channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nvc0_graph_ctxctl_isr(struct drm_device *dev)
+{
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ if (ustat & 0x00000001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
+ if (ustat & 0x00080000)
+ NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
+ if (ustat & ~0x00080001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
+
+ nvc0_graph_ctxctl_debug(dev);
+ nv_wr32(dev, 0x409c20, ustat);
+}
+
+static void
+nvc0_graph_isr(struct drm_device *dev)
+{
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ u32 chid = nvc0_graph_isr_chid(dev, inst);
+ u32 stat = nv_rd32(dev, 0x400100);
+ u32 addr = nv_rd32(dev, 0x400704);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 data = nv_rd32(dev, 0x400708);
+ u32 code = nv_rd32(dev, 0x400110);
+ u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
+
+ if (stat & 0x00000010) {
+ if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ nv_wr32(dev, 0x400100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000020) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000020);
+ stat &= ~0x00000020;
+ }
+
+ if (stat & 0x00100000) {
+ NV_INFO(dev, "PGRAPH: DATA_ERROR [");
+ nouveau_enum_print(nv50_data_error_names, code);
+ printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00100000);
+ stat &= ~0x00100000;
+ }
+
+ if (stat & 0x00200000) {
+ u32 trap = nv_rd32(dev, 0x400108);
+ NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
+ nv_wr32(dev, 0x400108, trap);
+ nv_wr32(dev, 0x400100, 0x00200000);
+ stat &= ~0x00200000;
+ }
+
+ if (stat & 0x00080000) {
+ nvc0_graph_ctxctl_isr(dev);
+ nv_wr32(dev, 0x400100, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
+ nv_wr32(dev, 0x400100, stat);
+ }
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+}
+
+static int
+nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
+ struct nvc0_graph_fuc *fuc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const struct firmware *fw;
+ char f[32];
+ int ret;
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
+ ret = request_firmware(&fw, f, &dev->pdev->dev);
+ if (ret) {
+ snprintf(f, sizeof(f), "nouveau/%s", fwname);
+ ret = request_firmware(&fw, f, &dev->pdev->dev);
+ if (ret) {
+ NV_ERROR(dev, "failed to load %s\n", fwname);
+ return ret;
+ }
+ }
+
+ fuc->size = fw->size;
+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+static void
+nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
+{
+ if (fuc->data) {
+ kfree(fuc->data);
+ fuc->data = NULL;
+ }
+}
+
+static void
+nvc0_graph_destroy(struct drm_device *dev, int engine)
+{
+ struct nvc0_graph_priv *priv = nv_engine(dev, engine);
+
+ if (nouveau_ctxfw) {
+ nvc0_graph_destroy_fw(&priv->fuc409c);
+ nvc0_graph_destroy_fw(&priv->fuc409d);
+ nvc0_graph_destroy_fw(&priv->fuc41ac);
+ nvc0_graph_destroy_fw(&priv->fuc41ad);
+ }
+
+ nouveau_irq_unregister(dev, 12);
+
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+ if (priv->grctx_vals)
+ kfree(priv->grctx_vals);
+
+ NVOBJ_ENGINE_DEL(dev, GR);
+ kfree(priv);
+}
+
+int
+nvc0_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_graph_priv *priv;
+ int ret, gpc, i;
+ u32 fermi;
+
+ fermi = nvc0_graph_class(dev);
+ if (!fermi) {
+ NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
+ return 0;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.destroy = nvc0_graph_destroy;
+ priv->base.init = nvc0_graph_init;
+ priv->base.fini = nvc0_graph_fini;
+ priv->base.context_new = nvc0_graph_context_new;
+ priv->base.context_del = nvc0_graph_context_del;
+ priv->base.object_new = nvc0_graph_object_new;
+
+ NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
+ nouveau_irq_register(dev, 12, nvc0_graph_isr);
+
+ if (nouveau_ctxfw) {
+ NV_INFO(dev, "PGRAPH: using external firmware\n");
+ if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
+ nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
+ nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
+ nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
+ ret = 0;
+ goto error;
+ }
+ }
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < 0x1000; i += 4) {
+ nv_wo32(priv->unk4188b4, i, 0x00000010);
+ nv_wo32(priv->unk4188b8, i, 0x00000010);
+ }
+
+ priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
+ priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
+ priv->tp_total += priv->tp_nr[gpc];
+ }
+
+ /*XXX: these need figuring out... */
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
+ priv->magic_not_rop_nr = 0x07;
+ } else
+ if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
+ priv->magic_not_rop_nr = 0x05;
+ } else
+ if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
+ priv->magic_not_rop_nr = 0x06;
+ }
+ break;
+ case 0xc3: /* 450, 4/0/0/0, 2 */
+ priv->magic_not_rop_nr = 0x03;
+ break;
+ case 0xc4: /* 460, 3/4/0/0, 4 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
+ case 0xc1: /* 2/0/0/0, 1 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
+ case 0xc8: /* 4/4/3/4, 5 */
+ priv->magic_not_rop_nr = 0x06;
+ break;
+ case 0xce: /* 4/4/0/0, 4 */
+ priv->magic_not_rop_nr = 0x03;
+ break;
+ case 0xcf: /* 4/0/0/0, 3 */
+ priv->magic_not_rop_nr = 0x03;
+ break;
+ case 0xd9: /* 1/0/0/0, 1 */
+ priv->magic_not_rop_nr = 0x01;
+ break;
+ }
+
+ if (!priv->magic_not_rop_nr) {
+ NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+ priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
+ priv->tp_nr[3], priv->rop_nr);
+ priv->magic_not_rop_nr = 0x00;
+ }
+
+ NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
+ NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
+ NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
+ if (fermi >= 0x9197)
+ NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
+ if (fermi >= 0x9297)
+ NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
+ NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
+ return 0;
+
+error:
+ nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
+ return ret;
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVC0_GRAPH_H__
+#define __NVC0_GRAPH_H__
+
+#define GPC_MAX 4
+#define TP_MAX 32
+
+#define ROP_BCAST(r) (0x408800 + (r))
+#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r) (0x418000 + (r))
+#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
+#define TP_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nvc0_graph_fuc {
+ u32 *data;
+ u32 size;
+};
+
+struct nvc0_graph_priv {
+ struct nouveau_exec_engine base;
+
+ struct nvc0_graph_fuc fuc409c;
+ struct nvc0_graph_fuc fuc409d;
+ struct nvc0_graph_fuc fuc41ac;
+ struct nvc0_graph_fuc fuc41ad;
+
+ u8 gpc_nr;
+ u8 rop_nr;
+ u8 tp_nr[GPC_MAX];
+ u8 tp_total;
+
+ u32 grctx_size;
+ u32 *grctx_vals;
+ struct nouveau_gpuobj *unk4188b4;
+ struct nouveau_gpuobj *unk4188b8;
+
+ u8 magic_not_rop_nr;
+};
+
+struct nvc0_graph_chan {
+ struct nouveau_gpuobj *grctx;
+ struct nouveau_gpuobj *unk408004; /* 0x418810 too */
+ struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
+ struct nouveau_gpuobj *unk418810; /* 0x419848 too */
+ struct nouveau_gpuobj *mmio;
+ int mmio_nr;
+};
+
+int nvc0_grctx_generate(struct nouveau_channel *);
+
+/* nvc0_graph.c uses this also to determine supported chipsets */
+static inline u32
+nvc0_graph_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0xc0:
+ case 0xc3:
+ case 0xc4:
+ case 0xce: /* guess, mmio trace shows only 0x9097 state */
+ case 0xcf: /* guess, mmio trace shows only 0x9097 state */
+ return 0x9097;
+ case 0xc1:
+ return 0x9197;
+ case 0xc8:
+ case 0xd9:
+ return 0x9297;
+ default:
+ return 0;
+ }
+}
+
+#endif
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/firmware.h>
+#include <linux/module.h>
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include <core/mm.h>
+#include <engine/fifo.h>
+
+#include "nve0.h"
+
+static void
+nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
+{
+ NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
+ nv_rd32(dev, base + 0x400));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
+ nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
+ NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
+ nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
+ nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
+}
+
+static void
+nve0_graph_ctxctl_debug(struct drm_device *dev)
+{
+ u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
+ u32 gpc;
+
+ nve0_graph_ctxctl_debug_unit(dev, 0x409000);
+ for (gpc = 0; gpc < gpcnr; gpc++)
+ nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
+}
+
+static int
+nve0_graph_load_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+
+ nv_wr32(dev, 0x409840, 0x00000030);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
+ nv_wr32(dev, 0x409504, 0x00000003);
+ if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
+ NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
+
+ return 0;
+}
+
+static int
+nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
+{
+ nv_wr32(dev, 0x409840, 0x00000003);
+ nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
+ nv_wr32(dev, 0x409504, 0x00000009);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nve0_graph_construct_context(struct nouveau_channel *chan)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ int ret, i;
+ u32 *ctx;
+
+ ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ nve0_graph_load_context(chan);
+
+ nv_wo32(grch->grctx, 0x1c, 1);
+ nv_wo32(grch->grctx, 0x20, 0);
+ nv_wo32(grch->grctx, 0x28, 0);
+ nv_wo32(grch->grctx, 0x2c, 0);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nve0_grctx_generate(chan);
+ if (ret)
+ goto err;
+
+ ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ ctx[i / 4] = nv_ro32(grch->grctx, i);
+
+ priv->grctx_vals = ctx;
+ return 0;
+
+err:
+ kfree(ctx);
+ return ret;
+}
+
+static int
+nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
+{
+ struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
+ struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
+ struct drm_device *dev = chan->dev;
+ u32 magic[GPC_MAX][2];
+ u16 offset = 0x0000;
+ int gpc;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
+ &grch->unk408004);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
+ &grch->unk40800c);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
+ &grch->unk418810);
+ if (ret)
+ return ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
+ &grch->mmio);
+ if (ret)
+ return ret;
+
+#define mmio(r,v) do { \
+ nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r)); \
+ nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v)); \
+ grch->mmio_nr++; \
+} while (0)
+ mmio(0x40800c, grch->unk40800c->linst >> 8);
+ mmio(0x408010, 0x80000000);
+ mmio(0x419004, grch->unk40800c->linst >> 8);
+ mmio(0x419008, 0x00000000);
+ mmio(0x4064cc, 0x80000000);
+ mmio(0x408004, grch->unk408004->linst >> 8);
+ mmio(0x408008, 0x80000030);
+ mmio(0x418808, grch->unk408004->linst >> 8);
+ mmio(0x41880c, 0x80000030);
+ mmio(0x4064c8, 0x01800600);
+ mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
+ mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
+ mmio(0x405830, 0x02180648);
+ mmio(0x4064c4, 0x0192ffff);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
+ u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
+ magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
+ magic[gpc][1] = 0x00000000 | (magic1 << 16);
+ offset += 0x0324 * priv->tpc_nr[gpc];
+ }
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ mmio(GPC_UNIT(gpc, 0x30c0), magic[gpc][0]);
+ mmio(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset);
+ offset += 0x07ff * priv->tpc_nr[gpc];
+ }
+
+ mmio(0x17e91c, 0x06060609);
+ mmio(0x17e920, 0x00090a05);
+#undef mmio
+ return 0;
+}
+
+static int
+nve0_graph_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct nve0_graph_priv *priv = nv_engine(dev, engine);
+ struct nve0_graph_chan *grch;
+ struct nouveau_gpuobj *grctx;
+ int ret, i;
+
+ grch = kzalloc(sizeof(*grch), GFP_KERNEL);
+ if (!grch)
+ return -ENOMEM;
+ chan->engctx[NVOBJ_ENGINE_GR] = grch;
+
+ ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
+ NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
+ &grch->grctx);
+ if (ret)
+ goto error;
+ grctx = grch->grctx;
+
+ ret = nve0_graph_create_context_mmio_list(chan);
+ if (ret)
+ goto error;
+
+ nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
+ nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
+ pinstmem->flush(dev);
+
+ if (!priv->grctx_vals) {
+ ret = nve0_graph_construct_context(chan);
+ if (ret)
+ goto error;
+ }
+
+ for (i = 0; i < priv->grctx_size; i += 4)
+ nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
+ nv_wo32(grctx, 0xf4, 0);
+ nv_wo32(grctx, 0xf8, 0);
+ nv_wo32(grctx, 0x10, grch->mmio_nr);
+ nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
+ nv_wo32(grctx, 0x1c, 1);
+ nv_wo32(grctx, 0x20, 0);
+ nv_wo32(grctx, 0x28, 0);
+ nv_wo32(grctx, 0x2c, 0);
+
+ pinstmem->flush(dev);
+ return 0;
+
+error:
+ priv->base.context_del(chan, engine);
+ return ret;
+}
+
+static void
+nve0_graph_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nve0_graph_chan *grch = chan->engctx[engine];
+
+ nouveau_gpuobj_ref(NULL, &grch->mmio);
+ nouveau_gpuobj_ref(NULL, &grch->unk418810);
+ nouveau_gpuobj_ref(NULL, &grch->unk40800c);
+ nouveau_gpuobj_ref(NULL, &grch->unk408004);
+ nouveau_gpuobj_ref(NULL, &grch->grctx);
+ chan->engctx[engine] = NULL;
+}
+
+static int
+nve0_graph_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ return 0;
+}
+
+static int
+nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ return 0;
+}
+
+static void
+nve0_graph_init_obj418880(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int i;
+
+ nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
+ for (i = 0; i < 4; i++)
+ nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
+ nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
+ nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
+}
+
+static void
+nve0_graph_init_regs(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x400080, 0x003083c2);
+ nv_wr32(dev, 0x400088, 0x0001ffe7);
+ nv_wr32(dev, 0x40008c, 0x00000000);
+ nv_wr32(dev, 0x400090, 0x00000030);
+ nv_wr32(dev, 0x40013c, 0x003901f7);
+ nv_wr32(dev, 0x400140, 0x00000100);
+ nv_wr32(dev, 0x400144, 0x00000000);
+ nv_wr32(dev, 0x400148, 0x00000110);
+ nv_wr32(dev, 0x400138, 0x00000000);
+ nv_wr32(dev, 0x400130, 0x00000000);
+ nv_wr32(dev, 0x400134, 0x00000000);
+ nv_wr32(dev, 0x400124, 0x00000002);
+}
+
+static void
+nve0_graph_init_units(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x409ffc, 0x00000000);
+ nv_wr32(dev, 0x409c14, 0x00003e3e);
+ nv_wr32(dev, 0x409c24, 0x000f0000);
+
+ nv_wr32(dev, 0x404000, 0xc0000000);
+ nv_wr32(dev, 0x404600, 0xc0000000);
+ nv_wr32(dev, 0x408030, 0xc0000000);
+ nv_wr32(dev, 0x404490, 0xc0000000);
+ nv_wr32(dev, 0x406018, 0xc0000000);
+ nv_wr32(dev, 0x407020, 0xc0000000);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x405844, 0x00ffffff);
+
+ nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
+ nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
+
+}
+
+static void
+nve0_graph_init_gpc_0(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
+ u32 data[TPC_MAX / 8];
+ u8 tpcnr[GPC_MAX];
+ int i, gpc, tpc;
+
+ nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
+
+ memset(data, 0x00, sizeof(data));
+ memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
+ for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
+ do {
+ gpc = (gpc + 1) % priv->gpc_nr;
+ } while (!tpcnr[gpc]);
+ tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
+
+ data[i / 8] |= tpc << ((i % 8) * 4);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
+ nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
+ nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
+ nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
+ priv->tpc_nr[gpc]);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
+ }
+
+ nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
+ nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
+}
+
+static void
+nve0_graph_init_gpc_1(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int gpc, tpc;
+
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
+ for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
+ nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
+ }
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
+ nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
+ }
+}
+
+static void
+nve0_graph_init_rop(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ int rop;
+
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
+ nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
+ }
+}
+
+static void
+nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
+ struct nve0_graph_fuc *code, struct nve0_graph_fuc *data)
+{
+ int i;
+
+ nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
+ for (i = 0; i < data->size / 4; i++)
+ nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
+
+ nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
+ for (i = 0; i < code->size / 4; i++) {
+ if ((i & 0x3f) == 0)
+ nv_wr32(dev, fuc_base + 0x0188, i >> 6);
+ nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
+ }
+}
+
+static int
+nve0_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ u32 r000260;
+
+ /* load fuc microcode */
+ r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
+ nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
+ nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
+ nv_wr32(dev, 0x000260, r000260);
+
+ /* start both of them running */
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x41a10c, 0x00000000);
+ nv_wr32(dev, 0x40910c, 0x00000000);
+ nv_wr32(dev, 0x41a100, 0x00000002);
+ nv_wr32(dev, 0x409100, 0x00000002);
+ if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
+ NV_INFO(dev, "0x409800 wait failed\n");
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x7fffffff);
+ nv_wr32(dev, 0x409504, 0x00000021);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000010);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
+ return -EBUSY;
+ }
+ priv->grctx_size = nv_rd32(dev, 0x409800);
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000016);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409840, 0xffffffff);
+ nv_wr32(dev, 0x409500, 0x00000000);
+ nv_wr32(dev, 0x409504, 0x00000025);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000030);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409810, 0xb00095c8);
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000031);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409810, 0x00080420);
+ nv_wr32(dev, 0x409800, 0x00000000);
+ nv_wr32(dev, 0x409500, 0x00000001);
+ nv_wr32(dev, 0x409504, 0x00000032);
+ if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
+ NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
+ return -EBUSY;
+ }
+
+ nv_wr32(dev, 0x409614, 0x00000070);
+ nv_wr32(dev, 0x409614, 0x00000770);
+ nv_wr32(dev, 0x40802c, 0x00000001);
+ return 0;
+}
+
+static int
+nve0_graph_init(struct drm_device *dev, int engine)
+{
+ int ret;
+
+ nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
+
+ nve0_graph_init_obj418880(dev);
+ nve0_graph_init_regs(dev);
+ nve0_graph_init_gpc_0(dev);
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+ nv_wr32(dev, 0x400100, 0xffffffff);
+ nv_wr32(dev, 0x40013c, 0xffffffff);
+
+ nve0_graph_init_units(dev);
+ nve0_graph_init_gpc_1(dev);
+ nve0_graph_init_rop(dev);
+
+ nv_wr32(dev, 0x400108, 0xffffffff);
+ nv_wr32(dev, 0x400138, 0xffffffff);
+ nv_wr32(dev, 0x400118, 0xffffffff);
+ nv_wr32(dev, 0x400130, 0xffffffff);
+ nv_wr32(dev, 0x40011c, 0xffffffff);
+ nv_wr32(dev, 0x400134, 0xffffffff);
+ nv_wr32(dev, 0x400054, 0x34ce3464);
+
+ ret = nve0_graph_init_ctxctl(dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int
+nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < pfifo->channels; i++) {
+ chan = dev_priv->channels.ptr[i];
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (inst == chan->ramin->vinst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nve0_graph_ctxctl_isr(struct drm_device *dev)
+{
+ u32 ustat = nv_rd32(dev, 0x409c18);
+
+ if (ustat & 0x00000001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
+ if (ustat & 0x00080000)
+ NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
+ if (ustat & ~0x00080001)
+ NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
+
+ nve0_graph_ctxctl_debug(dev);
+ nv_wr32(dev, 0x409c20, ustat);
+}
+
+static void
+nve0_graph_trap_isr(struct drm_device *dev, int chid)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
+ u32 trap = nv_rd32(dev, 0x400108);
+ int rop;
+
+ if (trap & 0x00000001) {
+ u32 stat = nv_rd32(dev, 0x404000);
+ NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
+ nv_wr32(dev, 0x404000, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x00000001);
+ trap &= ~0x00000001;
+ }
+
+ if (trap & 0x00000010) {
+ u32 stat = nv_rd32(dev, 0x405840);
+ NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
+ nv_wr32(dev, 0x405840, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x00000010);
+ trap &= ~0x00000010;
+ }
+
+ if (trap & 0x02000000) {
+ for (rop = 0; rop < priv->rop_nr; rop++) {
+ u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
+ u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
+ NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
+ rop, chid, statz, statc);
+ nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
+ nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
+ }
+ nv_wr32(dev, 0x400108, 0x02000000);
+ trap &= ~0x02000000;
+ }
+
+ if (trap) {
+ NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
+ nv_wr32(dev, 0x400108, trap);
+ }
+}
+
+static void
+nve0_graph_isr(struct drm_device *dev)
+{
+ u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
+ u32 chid = nve0_graph_isr_chid(dev, inst);
+ u32 stat = nv_rd32(dev, 0x400100);
+ u32 addr = nv_rd32(dev, 0x400704);
+ u32 mthd = (addr & 0x00003ffc);
+ u32 subc = (addr & 0x00070000) >> 16;
+ u32 data = nv_rd32(dev, 0x400708);
+ u32 code = nv_rd32(dev, 0x400110);
+ u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
+
+ if (stat & 0x00000010) {
+ if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
+ "subc %d class 0x%04x mthd 0x%04x "
+ "data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ }
+ nv_wr32(dev, 0x400100, 0x00000010);
+ stat &= ~0x00000010;
+ }
+
+ if (stat & 0x00000020) {
+ NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
+ "class 0x%04x mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00000020);
+ stat &= ~0x00000020;
+ }
+
+ if (stat & 0x00100000) {
+ NV_INFO(dev, "PGRAPH: DATA_ERROR [");
+ nouveau_enum_print(nv50_data_error_names, code);
+ printk("] ch %d [0x%010llx] subc %d class 0x%04x "
+ "mthd 0x%04x data 0x%08x\n",
+ chid, inst, subc, class, mthd, data);
+ nv_wr32(dev, 0x400100, 0x00100000);
+ stat &= ~0x00100000;
+ }
+
+ if (stat & 0x00200000) {
+ nve0_graph_trap_isr(dev, chid);
+ nv_wr32(dev, 0x400100, 0x00200000);
+ stat &= ~0x00200000;
+ }
+
+ if (stat & 0x00080000) {
+ nve0_graph_ctxctl_isr(dev);
+ nv_wr32(dev, 0x400100, 0x00080000);
+ stat &= ~0x00080000;
+ }
+
+ if (stat) {
+ NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
+ nv_wr32(dev, 0x400100, stat);
+ }
+
+ nv_wr32(dev, 0x400500, 0x00010001);
+}
+
+static int
+nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
+ struct nve0_graph_fuc *fuc)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const struct firmware *fw;
+ char f[32];
+ int ret;
+
+ snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
+ ret = request_firmware(&fw, f, &dev->pdev->dev);
+ if (ret)
+ return ret;
+
+ fuc->size = fw->size;
+ fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
+ release_firmware(fw);
+ return (fuc->data != NULL) ? 0 : -ENOMEM;
+}
+
+static void
+nve0_graph_destroy_fw(struct nve0_graph_fuc *fuc)
+{
+ if (fuc->data) {
+ kfree(fuc->data);
+ fuc->data = NULL;
+ }
+}
+
+static void
+nve0_graph_destroy(struct drm_device *dev, int engine)
+{
+ struct nve0_graph_priv *priv = nv_engine(dev, engine);
+
+ nve0_graph_destroy_fw(&priv->fuc409c);
+ nve0_graph_destroy_fw(&priv->fuc409d);
+ nve0_graph_destroy_fw(&priv->fuc41ac);
+ nve0_graph_destroy_fw(&priv->fuc41ad);
+
+ nouveau_irq_unregister(dev, 12);
+
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
+ nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
+
+ if (priv->grctx_vals)
+ kfree(priv->grctx_vals);
+
+ NVOBJ_ENGINE_DEL(dev, GR);
+ kfree(priv);
+}
+
+int
+nve0_graph_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nve0_graph_priv *priv;
+ int ret, gpc, i;
+ u32 kepler;
+
+ kepler = nve0_graph_class(dev);
+ if (!kepler) {
+ NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
+ return 0;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->base.destroy = nve0_graph_destroy;
+ priv->base.init = nve0_graph_init;
+ priv->base.fini = nve0_graph_fini;
+ priv->base.context_new = nve0_graph_context_new;
+ priv->base.context_del = nve0_graph_context_del;
+ priv->base.object_new = nve0_graph_object_new;
+
+ NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
+ nouveau_irq_register(dev, 12, nve0_graph_isr);
+
+ NV_INFO(dev, "PGRAPH: using external firmware\n");
+ if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
+ nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
+ nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
+ nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
+ ret = 0;
+ goto error;
+ }
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
+ if (ret)
+ goto error;
+
+ for (i = 0; i < 0x1000; i += 4) {
+ nv_wo32(priv->unk4188b4, i, 0x00000010);
+ nv_wo32(priv->unk4188b8, i, 0x00000010);
+ }
+
+ priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
+ priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
+ for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
+ priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
+ priv->tpc_total += priv->tpc_nr[gpc];
+ }
+
+ switch (dev_priv->chipset) {
+ case 0xe4:
+ if (priv->tpc_total == 8)
+ priv->magic_not_rop_nr = 3;
+ else
+ if (priv->tpc_total == 7)
+ priv->magic_not_rop_nr = 1;
+ break;
+ case 0xe7:
+ priv->magic_not_rop_nr = 1;
+ break;
+ default:
+ break;
+ }
+
+ if (!priv->magic_not_rop_nr) {
+ NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
+ priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
+ priv->tpc_nr[3], priv->rop_nr);
+ priv->magic_not_rop_nr = 0x00;
+ }
+
+ NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
+ NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
+ NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
+ NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
+ NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
+ return 0;
+
+error:
+ nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
+ return ret;
+}
--- /dev/null
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NVE0_GRAPH_H__
+#define __NVE0_GRAPH_H__
+
+#define GPC_MAX 4
+#define TPC_MAX 32
+
+#define ROP_BCAST(r) (0x408800 + (r))
+#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
+#define GPC_BCAST(r) (0x418000 + (r))
+#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
+#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
+
+struct nve0_graph_fuc {
+ u32 *data;
+ u32 size;
+};
+
+struct nve0_graph_priv {
+ struct nouveau_exec_engine base;
+
+ struct nve0_graph_fuc fuc409c;
+ struct nve0_graph_fuc fuc409d;
+ struct nve0_graph_fuc fuc41ac;
+ struct nve0_graph_fuc fuc41ad;
+
+ u8 gpc_nr;
+ u8 rop_nr;
+ u8 tpc_nr[GPC_MAX];
+ u8 tpc_total;
+
+ u32 grctx_size;
+ u32 *grctx_vals;
+ struct nouveau_gpuobj *unk4188b4;
+ struct nouveau_gpuobj *unk4188b8;
+
+ u8 magic_not_rop_nr;
+};
+
+struct nve0_graph_chan {
+ struct nouveau_gpuobj *grctx;
+ struct nouveau_gpuobj *unk408004; /* 0x418810 too */
+ struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
+ struct nouveau_gpuobj *unk418810; /* 0x419848 too */
+ struct nouveau_gpuobj *mmio;
+ int mmio_nr;
+};
+
+int nve0_grctx_generate(struct nouveau_channel *);
+
+/* nve0_graph.c uses this also to determine supported chipsets */
+static inline u32
+nve0_graph_class(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0xe4:
+ case 0xe7:
+ return 0xa097;
+ default:
+ return 0;
+ }
+}
+
+#endif
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <engine/fifo.h>
+#include <core/ramht.h>
+
+struct nv31_mpeg_engine {
+ struct nouveau_exec_engine base;
+ atomic_t refcount;
+};
+
+
+static int
+nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
+
+ if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
+ return -EBUSY;
+
+ chan->engctx[engine] = (void *)0xdeadcafe;
+ return 0;
+}
+
+static void
+nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
+ atomic_dec(&pmpeg->refcount);
+ chan->engctx[engine] = NULL;
+}
+
+static int
+nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ctx = NULL;
+ unsigned long flags;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &ctx);
+ if (ret)
+ return ret;
+
+ nv_wo32(ctx, 0x78, 0x02001ec1);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
+ if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
+ nv_wr32(dev, 0x00330c, ctx->pinst >> 4);
+ nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4);
+ nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ chan->engctx[engine] = ctx;
+ return 0;
+}
+
+static void
+nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_gpuobj *ctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ unsigned long flags;
+ u32 inst = 0x80000000 | (ctx->pinst >> 4);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+ nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
+ if (nv_rd32(dev, 0x00b318) == inst)
+ nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
+ nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
+ nouveau_gpuobj_ref(NULL, &ctx);
+ chan->engctx[engine] = NULL;
+}
+
+static int
+nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 2;
+ obj->class = class;
+
+ nv_wo32(obj, 0x00, class);
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static int
+nv31_mpeg_init(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
+ int i;
+
+ /* VPE init */
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
+ nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+ nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
+
+ for (i = 0; i < dev_priv->engine.fb.num_tiles; i++)
+ pmpeg->base.set_tile_region(dev, i);
+
+ /* PMPEG init */
+ nv_wr32(dev, 0x00b32c, 0x00000000);
+ nv_wr32(dev, 0x00b314, 0x00000100);
+ nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
+ nv_wr32(dev, 0x00b300, 0x02001ec1);
+ nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
+
+ nv_wr32(dev, 0x00b100, 0xffffffff);
+ nv_wr32(dev, 0x00b140, 0xffffffff);
+
+ if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ /*XXX: context save? */
+ nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x00b140, 0x00000000);
+ return 0;
+}
+
+static int
+nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+{
+ struct drm_device *dev = chan->dev;
+ u32 inst = data << 4;
+ u32 dma0 = nv_ri32(dev, inst + 0);
+ u32 dma1 = nv_ri32(dev, inst + 4);
+ u32 dma2 = nv_ri32(dev, inst + 8);
+ u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
+ u32 size = dma1 + 1;
+
+ /* only allow linear DMA objects */
+ if (!(dma0 & 0x00002000))
+ return -EINVAL;
+
+ if (mthd == 0x0190) {
+ /* DMA_CMD */
+ nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000));
+ nv_wr32(dev, 0x00b334, base);
+ nv_wr32(dev, 0x00b324, size);
+ } else
+ if (mthd == 0x01a0) {
+ /* DMA_DATA */
+ nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
+ nv_wr32(dev, 0x00b360, base);
+ nv_wr32(dev, 0x00b364, size);
+ } else {
+ /* DMA_IMAGE, VRAM only */
+ if (dma0 & 0x000c0000)
+ return -EINVAL;
+
+ nv_wr32(dev, 0x00b370, base);
+ nv_wr32(dev, 0x00b374, size);
+ }
+
+ return 0;
+}
+
+static int
+nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ctx;
+ unsigned long flags;
+ int i;
+
+ /* hardcode drm channel id on nv3x, so swmthd lookup works */
+ if (dev_priv->card_type < NV_40)
+ return 0;
+
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (i = 0; i < pfifo->channels; i++) {
+ if (!dev_priv->channels.ptr[i])
+ continue;
+
+ ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
+ if (ctx && ctx->pinst == inst)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+ return i;
+}
+
+static void
+nv31_vpe_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch);
+ nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit);
+ nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr);
+}
+
+static void
+nv31_mpeg_isr(struct drm_device *dev)
+{
+ u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
+ u32 chid = nv31_mpeg_isr_chid(dev, inst);
+ u32 stat = nv_rd32(dev, 0x00b100);
+ u32 type = nv_rd32(dev, 0x00b230);
+ u32 mthd = nv_rd32(dev, 0x00b234);
+ u32 data = nv_rd32(dev, 0x00b238);
+ u32 show = stat;
+
+ if (stat & 0x01000000) {
+ /* happens on initial binding of the object */
+ if (type == 0x00000020 && mthd == 0x0000) {
+ nv_mask(dev, 0x00b308, 0x00000000, 0x00000000);
+ show &= ~0x01000000;
+ }
+
+ if (type == 0x00000010) {
+ if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data))
+ show &= ~0x01000000;
+ }
+ }
+
+ nv_wr32(dev, 0x00b100, stat);
+ nv_wr32(dev, 0x00b230, 0x00000001);
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ chid, inst, stat, type, mthd, data);
+ }
+}
+
+static void
+nv31_vpe_isr(struct drm_device *dev)
+{
+ if (nv_rd32(dev, 0x00b100))
+ nv31_mpeg_isr(dev);
+
+ if (nv_rd32(dev, 0x00b800)) {
+ u32 stat = nv_rd32(dev, 0x00b800);
+ NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
+ nv_wr32(dev, 0xb800, stat);
+ }
+}
+
+static void
+nv31_mpeg_destroy(struct drm_device *dev, int engine)
+{
+ struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 0);
+
+ NVOBJ_ENGINE_DEL(dev, MPEG);
+ kfree(pmpeg);
+}
+
+int
+nv31_mpeg_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv31_mpeg_engine *pmpeg;
+
+ pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
+ if (!pmpeg)
+ return -ENOMEM;
+ atomic_set(&pmpeg->refcount, 0);
+
+ pmpeg->base.destroy = nv31_mpeg_destroy;
+ pmpeg->base.init = nv31_mpeg_init;
+ pmpeg->base.fini = nv31_mpeg_fini;
+ if (dev_priv->card_type < NV_40) {
+ pmpeg->base.context_new = nv31_mpeg_context_new;
+ pmpeg->base.context_del = nv31_mpeg_context_del;
+ } else {
+ pmpeg->base.context_new = nv40_mpeg_context_new;
+ pmpeg->base.context_del = nv40_mpeg_context_del;
+ }
+ pmpeg->base.object_new = nv31_mpeg_object_new;
+
+ /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
+ * all VPE engines, for this driver's purposes the PMPEG engine
+ * will be treated as the "master" and handle the global VPE
+ * bits too
+ */
+ pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
+ nouveau_irq_register(dev, 0, nv31_vpe_isr);
+
+ NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
+ NVOBJ_CLASS(dev, 0x3174, MPEG);
+ NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
+ NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
+ NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
+
+#if 0
+ NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
+ NVOBJ_CLASS(dev, 0x4075, ME);
+#endif
+ return 0;
+
+}
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <core/ramht.h>
+
+struct nv50_mpeg_engine {
+ struct nouveau_exec_engine base;
+};
+
+static inline u32
+CTX_PTR(struct drm_device *dev, u32 offset)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset == 0x50)
+ offset += 0x0260;
+ else
+ offset += 0x0060;
+
+ return offset;
+}
+
+static int
+nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramin = chan->ramin;
+ struct nouveau_gpuobj *ctx = NULL;
+ int ret;
+
+ NV_DEBUG(dev, "ch%d\n", chan->id);
+
+ ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &ctx);
+ if (ret)
+ return ret;
+
+ nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
+ nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
+ nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
+ nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
+ nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
+ nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
+
+ nv_wo32(ctx, 0x70, 0x00801ec1);
+ nv_wo32(ctx, 0x7c, 0x0000037c);
+ dev_priv->engine.instmem.flush(dev);
+
+ chan->engctx[engine] = ctx;
+ return 0;
+}
+
+static void
+nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
+{
+ struct nouveau_gpuobj *ctx = chan->engctx[engine];
+ struct drm_device *dev = chan->dev;
+ int i;
+
+ for (i = 0x00; i <= 0x14; i += 4)
+ nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
+
+ nouveau_gpuobj_ref(NULL, &ctx);
+ chan->engctx[engine] = NULL;
+}
+
+static int
+nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
+ u32 handle, u16 class)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *obj = NULL;
+ int ret;
+
+ ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
+ if (ret)
+ return ret;
+ obj->engine = 2;
+ obj->class = class;
+
+ nv_wo32(obj, 0x00, class);
+ nv_wo32(obj, 0x04, 0x00000000);
+ nv_wo32(obj, 0x08, 0x00000000);
+ nv_wo32(obj, 0x0c, 0x00000000);
+ dev_priv->engine.instmem.flush(dev);
+
+ ret = nouveau_ramht_insert(chan, handle, obj);
+ nouveau_gpuobj_ref(NULL, &obj);
+ return ret;
+}
+
+static void
+nv50_mpeg_tlb_flush(struct drm_device *dev, int engine)
+{
+ nv50_vm_flush_engine(dev, 0x08);
+}
+
+static int
+nv50_mpeg_init(struct drm_device *dev, int engine)
+{
+ nv_wr32(dev, 0x00b32c, 0x00000000);
+ nv_wr32(dev, 0x00b314, 0x00000100);
+ nv_wr32(dev, 0x00b0e0, 0x0000001a);
+
+ nv_wr32(dev, 0x00b220, 0x00000044);
+ nv_wr32(dev, 0x00b300, 0x00801ec1);
+ nv_wr32(dev, 0x00b390, 0x00000000);
+ nv_wr32(dev, 0x00b394, 0x00000000);
+ nv_wr32(dev, 0x00b398, 0x00000000);
+ nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
+
+ nv_wr32(dev, 0x00b100, 0xffffffff);
+ nv_wr32(dev, 0x00b140, 0xffffffff);
+
+ if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
+ NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int
+nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x00b140, 0x00000000);
+ return 0;
+}
+
+static void
+nv50_mpeg_isr(struct drm_device *dev)
+{
+ u32 stat = nv_rd32(dev, 0x00b100);
+ u32 type = nv_rd32(dev, 0x00b230);
+ u32 mthd = nv_rd32(dev, 0x00b234);
+ u32 data = nv_rd32(dev, 0x00b238);
+ u32 show = stat;
+
+ if (stat & 0x01000000) {
+ /* happens on initial binding of the object */
+ if (type == 0x00000020 && mthd == 0x0000) {
+ nv_wr32(dev, 0x00b308, 0x00000100);
+ show &= ~0x01000000;
+ }
+ }
+
+ if (show && nouveau_ratelimit()) {
+ NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ stat, type, mthd, data);
+ }
+
+ nv_wr32(dev, 0x00b100, stat);
+ nv_wr32(dev, 0x00b230, 0x00000001);
+ nv50_fb_vm_trap(dev, 1);
+}
+
+static void
+nv50_vpe_isr(struct drm_device *dev)
+{
+ if (nv_rd32(dev, 0x00b100))
+ nv50_mpeg_isr(dev);
+
+ if (nv_rd32(dev, 0x00b800)) {
+ u32 stat = nv_rd32(dev, 0x00b800);
+ NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
+ nv_wr32(dev, 0xb800, stat);
+ }
+}
+
+static void
+nv50_mpeg_destroy(struct drm_device *dev, int engine)
+{
+ struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine);
+
+ nouveau_irq_unregister(dev, 0);
+
+ NVOBJ_ENGINE_DEL(dev, MPEG);
+ kfree(pmpeg);
+}
+
+int
+nv50_mpeg_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_mpeg_engine *pmpeg;
+
+ pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
+ if (!pmpeg)
+ return -ENOMEM;
+
+ pmpeg->base.destroy = nv50_mpeg_destroy;
+ pmpeg->base.init = nv50_mpeg_init;
+ pmpeg->base.fini = nv50_mpeg_fini;
+ pmpeg->base.context_new = nv50_mpeg_context_new;
+ pmpeg->base.context_del = nv50_mpeg_context_del;
+ pmpeg->base.object_new = nv50_mpeg_object_new;
+ pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush;
+
+ if (dev_priv->chipset == 0x50) {
+ nouveau_irq_register(dev, 0, nv50_vpe_isr);
+ NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
+ NVOBJ_CLASS(dev, 0x3174, MPEG);
+#if 0
+ NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
+ NVOBJ_CLASS(dev, 0x4075, ME);
+#endif
+ } else {
+ nouveau_irq_register(dev, 0, nv50_mpeg_isr);
+ NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
+ NVOBJ_CLASS(dev, 0x8274, MPEG);
+ }
+
+ return 0;
+
+}
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include <subdev/vm.h>
+#include <core/ramht.h>
+
+struct nv98_ppp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00000002))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+ return 0;
+}
+
+static int
+nv98_ppp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
+ return 0;
+}
+
+static void
+nv98_ppp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, PPP);
+
+ kfree(pppp);
+}
+
+int
+nv98_ppp_create(struct drm_device *dev)
+{
+ struct nv98_ppp_engine *pppp;
+
+ pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
+ if (!pppp)
+ return -ENOMEM;
+
+ pppp->base.destroy = nv98_ppp_destroy;
+ pppp->base.init = nv98_ppp_init;
+ pppp->base.fini = nv98_ppp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_util.h"
+#include <subdev/vm.h>
+#include <core/ramht.h>
+
+/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
+ * more than just an enable/disable stub this needs to be split out to
+ * nv98_vp.c...
+ */
+
+struct nv84_vp_engine {
+ struct nouveau_exec_engine base;
+};
+
+static int
+nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
+{
+ if (!(nv_rd32(dev, 0x000200) & 0x00020000))
+ return 0;
+
+ nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+ return 0;
+}
+
+static int
+nv84_vp_init(struct drm_device *dev, int engine)
+{
+ nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
+ nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
+ return 0;
+}
+
+static void
+nv84_vp_destroy(struct drm_device *dev, int engine)
+{
+ struct nv84_vp_engine *pvp = nv_engine(dev, engine);
+
+ NVOBJ_ENGINE_DEL(dev, VP);
+
+ kfree(pvp);
+}
+
+int
+nv84_vp_create(struct drm_device *dev)
+{
+ struct nv84_vp_engine *pvp;
+
+ pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
+ if (!pvp)
+ return -ENOMEM;
+
+ pvp->base.destroy = nv84_vp_destroy;
+ pvp->base.init = nv84_vp_init;
+ pvp->base.fini = nv84_vp_fini;
+
+ NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_REGION_H__
+#define __NOUVEAU_REGION_H__
+
+struct nouveau_mm_node {
+ struct list_head nl_entry;
+ struct list_head fl_entry;
+ struct list_head rl_entry;
+
+ u8 type;
+ u32 offset;
+ u32 length;
+};
+
+struct nouveau_mm {
+ struct list_head nodes;
+ struct list_head free;
+
+ struct mutex mutex;
+
+ u32 block_size;
+ int heap_nodes;
+};
+
+int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
+int nouveau_mm_fini(struct nouveau_mm *);
+int nouveau_mm_pre(struct nouveau_mm *);
+int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
+ u32 align, struct nouveau_mm_node **);
+void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_RAMHT_H__
+#define __NOUVEAU_RAMHT_H__
+
+struct nouveau_ramht_entry {
+ struct list_head head;
+ struct nouveau_channel *channel;
+ struct nouveau_gpuobj *gpuobj;
+ u32 handle;
+};
+
+struct nouveau_ramht {
+ struct drm_device *dev;
+ struct kref refcount;
+ spinlock_t lock;
+ struct nouveau_gpuobj *gpuobj;
+ struct list_head entries;
+ int bits;
+};
+
+extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
+ struct nouveau_ramht **);
+extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
+ struct nouveau_channel *unref_channel);
+
+extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
+ struct nouveau_gpuobj *);
+extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern struct nouveau_gpuobj *
+nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
+
+#endif
--- /dev/null
+#ifndef __NOUVEAU_FIFO_H__
+#define __NOUVEAU_FIFO_H__
+
+struct nouveau_fifo_priv {
+ struct nouveau_exec_engine base;
+ u32 channels;
+};
+
+struct nouveau_fifo_chan {
+};
+
+bool nv04_fifo_cache_pull(struct drm_device *, bool);
+void nv04_fifo_context_del(struct nouveau_channel *, int);
+int nv04_fifo_fini(struct drm_device *, int, bool);
+int nv04_fifo_init(struct drm_device *, int);
+void nv04_fifo_isr(struct drm_device *);
+void nv04_fifo_destroy(struct drm_device *, int);
+
+void nv50_fifo_playlist_update(struct drm_device *);
+void nv50_fifo_destroy(struct drm_device *, int);
+void nv50_fifo_tlb_flush(struct drm_device *, int);
+
+int nv04_fifo_create(struct drm_device *);
+int nv10_fifo_create(struct drm_device *);
+int nv17_fifo_create(struct drm_device *);
+int nv40_fifo_create(struct drm_device *);
+int nv50_fifo_create(struct drm_device *);
+int nv84_fifo_create(struct drm_device *);
+int nvc0_fifo_create(struct drm_device *);
+int nve0_fifo_create(struct drm_device *);
+
+#endif
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_GPIO_H__
+#define __NOUVEAU_GPIO_H__
+
+struct gpio_func {
+ u8 func;
+ u8 line;
+ u8 log[2];
+};
+
+/* nouveau_gpio.c */
+int nouveau_gpio_create(struct drm_device *);
+void nouveau_gpio_destroy(struct drm_device *);
+int nouveau_gpio_init(struct drm_device *);
+void nouveau_gpio_fini(struct drm_device *);
+void nouveau_gpio_reset(struct drm_device *);
+int nouveau_gpio_drive(struct drm_device *, int idx, int line,
+ int dir, int out);
+int nouveau_gpio_sense(struct drm_device *, int idx, int line);
+int nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
+ struct gpio_func *);
+int nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
+int nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
+int nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
+void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
+int nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
+ void (*)(void *, int state), void *data);
+void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
+ void (*)(void *, int state), void *data);
+
+static inline bool
+nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
+{
+ struct gpio_func func;
+ return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
+}
+
+static inline int
+nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
+{
+ return nouveau_gpio_set(dev, 0, tag, 0xff, state);
+}
+
+static inline int
+nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
+{
+ return nouveau_gpio_get(dev, 0, tag, 0xff);
+}
+
+#endif
--- /dev/null
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __NOUVEAU_I2C_H__
+#define __NOUVEAU_I2C_H__
+
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include "drm_dp_helper.h"
+
+#define NV_I2C_PORT(n) (0x00 + (n))
+#define NV_I2C_PORT_NUM 0x10
+#define NV_I2C_DEFAULT(n) (0x80 + (n))
+
+struct nouveau_i2c_chan {
+ struct i2c_adapter adapter;
+ struct drm_device *dev;
+ struct i2c_algo_bit_data bit;
+ struct list_head head;
+ u8 index;
+ u8 type;
+ u32 dcb;
+ u32 drive;
+ u32 sense;
+ u32 state;
+};
+
+int nouveau_i2c_init(struct drm_device *);
+void nouveau_i2c_fini(struct drm_device *);
+struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
+bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
+int nouveau_i2c_identify(struct drm_device *dev, const char *what,
+ struct i2c_board_info *info,
+ bool (*match)(struct nouveau_i2c_chan *,
+ struct i2c_board_info *),
+ int index);
+
+extern const struct i2c_algorithm nouveau_dp_i2c_algo;
+
+#endif /* __NOUVEAU_I2C_H__ */
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_VM_H__
+#define __NOUVEAU_VM_H__
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include <core/mm.h>
+
+struct nouveau_vm_pgt {
+ struct nouveau_gpuobj *obj[2];
+ u32 refcount[2];
+};
+
+struct nouveau_vm_pgd {
+ struct list_head head;
+ struct nouveau_gpuobj *obj;
+};
+
+struct nouveau_vma {
+ struct list_head head;
+ int refcount;
+ struct nouveau_vm *vm;
+ struct nouveau_mm_node *node;
+ u64 offset;
+ u32 access;
+};
+
+struct nouveau_vm {
+ struct drm_device *dev;
+ struct nouveau_mm mm;
+ int refcount;
+
+ struct list_head pgd_list;
+ atomic_t engref[16];
+
+ struct nouveau_vm_pgt *pgt;
+ u32 fpde;
+ u32 lpde;
+
+ u32 pgt_bits;
+ u8 spg_shift;
+ u8 lpg_shift;
+
+ void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+ void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_mem *, u32 pte, u32 cnt,
+ u64 phys, u64 delta);
+ void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
+
+ void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
+ void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
+ void (*flush)(struct nouveau_vm *);
+};
+
+/* nouveau_vm.c */
+int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
+ struct nouveau_vm **);
+int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
+ struct nouveau_gpuobj *pgd);
+int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
+ u32 access, struct nouveau_vma *);
+void nouveau_vm_put(struct nouveau_vma *);
+void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
+void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
+void nouveau_vm_unmap(struct nouveau_vma *);
+void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
+void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
+ struct nouveau_mem *);
+void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
+ struct nouveau_mem *mem);
+/* nv50_vm.c */
+void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
+void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
+void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nv50_vm_flush(struct nouveau_vm *);
+void nv50_vm_flush_engine(struct drm_device *, int engine);
+
+/* nvc0_vm.c */
+void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2]);
+void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
+void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
+ struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
+void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
+void nvc0_vm_flush(struct nouveau_vm *);
+
+#endif
--- /dev/null
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_fb_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
+
+ if (boot0 & 0x00000100) {
+ dev_priv->vram_size = ((boot0 >> 12) & 0xf) * 2 + 2;
+ dev_priv->vram_size *= 1024 * 1024;
+ } else {
+ switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
+ dev_priv->vram_size = 32 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
+ dev_priv->vram_size = 16 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
+ dev_priv->vram_size = 8 * 1024 * 1024;
+ break;
+ case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
+ dev_priv->vram_size = 4 * 1024 * 1024;
+ break;
+ }
+ }
+
+ if ((boot0 & 0x00000038) <= 0x10)
+ dev_priv->vram_type = NV_MEM_TYPE_SGRAM;
+ else
+ dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
+
+ return 0;
+}
+
+int
+nv04_fb_init(struct drm_device *dev)
+{
+ /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
+ * nvidia reading PFB_CFG_0, then writing back its original value.
+ * (which was 0x701114 in this case)
+ */
+
+ nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
+ return 0;
+}
+
+void
+nv04_fb_takedown(struct drm_device *dev)
+{
+}
--- /dev/null
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+void
+nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = 0x80000000 | addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+void
+nv10_fb_free_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
+}
+
+void
+nv10_fb_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+}
+
+int
+nv1a_fb_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pci_dev *bridge;
+ uint32_t mem, mib;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
+ if (!bridge) {
+ NV_ERROR(dev, "no bridge device\n");
+ return 0;
+ }
+
+ if (dev_priv->chipset == 0x1a) {
+ pci_read_config_dword(bridge, 0x7c, &mem);
+ mib = ((mem >> 6) & 31) + 1;
+ } else {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ mib = ((mem >> 4) & 127) + 1;
+ }
+
+ dev_priv->vram_size = mib * 1024 * 1024;
+ return 0;
+}
+
+int
+nv10_fb_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 fifo_data = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+ u32 cfg0 = nv_rd32(dev, 0x100200);
+
+ dev_priv->vram_size = fifo_data & NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
+
+ if (cfg0 & 0x00000001)
+ dev_priv->vram_type = NV_MEM_TYPE_DDR1;
+ else
+ dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
+
+ return 0;
+}
+
+int
+nv10_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i;
+
+ /* Turn all the tiling regions off. */
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_tile_region(dev, i);
+
+ return 0;
+}
+
+void
+nv10_fb_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i;
+
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->free_tile_region(dev, i);
+}
--- /dev/null
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+static struct drm_mm_node *
+nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct drm_mm_node *mem;
+ int ret;
+
+ ret = drm_mm_pre_get(&pfb->tag_heap);
+ if (ret)
+ return NULL;
+
+ spin_lock(&dev_priv->tile.lock);
+ mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
+ if (mem)
+ mem = drm_mm_get_block_atomic(mem, size, 0);
+ spin_unlock(&dev_priv->tile.lock);
+
+ return mem;
+}
+
+static void
+nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node **pmem)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_mm_node *mem = *pmem;
+ if (mem) {
+ spin_lock(&dev_priv->tile.lock);
+ drm_mm_put_block(mem);
+ spin_unlock(&dev_priv->tile.lock);
+ *pmem = NULL;
+ }
+}
+
+void
+nv20_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
+
+ tile->addr = 0x00000001 | addr;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+
+ /* Allocate some of the on-die tag memory, used to store Z
+ * compression meta-data (most likely just a bitmap determining
+ * if a given tile is compressed or not).
+ */
+ if (flags & NOUVEAU_GEM_TILE_ZETA) {
+ tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
+ if (tile->tag_mem) {
+ /* Enable Z compression */
+ tile->zcomp = tile->tag_mem->start;
+ if (dev_priv->chipset >= 0x25) {
+ if (bpp == 16)
+ tile->zcomp |= NV25_PFB_ZCOMP_MODE_16;
+ else
+ tile->zcomp |= NV25_PFB_ZCOMP_MODE_32;
+ } else {
+ tile->zcomp |= NV20_PFB_ZCOMP_EN;
+ if (bpp != 16)
+ tile->zcomp |= NV20_PFB_ZCOMP_MODE_32;
+ }
+ }
+
+ tile->addr |= 2;
+ }
+}
+
+void
+nv20_fb_free_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
+ nv20_fb_free_tag(dev, &tile->tag_mem);
+}
+
+void
+nv20_fb_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+ nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
+}
+
+int
+nv20_fb_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 mem_size = nv_rd32(dev, 0x10020c);
+ u32 pbus1218 = nv_rd32(dev, 0x001218);
+
+ dev_priv->vram_size = mem_size & 0xff000000;
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_GDDR2; break;
+ }
+
+ return 0;
+}
+
+int
+nv20_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i;
+
+ if (dev_priv->chipset >= 0x25)
+ drm_mm_init(&pfb->tag_heap, 0, 64 * 1024);
+ else
+ drm_mm_init(&pfb->tag_heap, 0, 32 * 1024);
+
+ /* Turn all the tiling regions off. */
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_tile_region(dev, i);
+
+ return 0;
+}
+
+void
+nv20_fb_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i;
+
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->free_tile_region(dev, i);
+
+ drm_mm_takedown(&pfb->tag_heap);
+}
--- /dev/null
+/*
+ * Copyright (C) 2010 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+void
+nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
+ uint32_t size, uint32_t pitch, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = addr | 1;
+ tile->limit = max(1u, addr + size) - 1;
+ tile->pitch = pitch;
+}
+
+void
+nv30_fb_free_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ tile->addr = tile->limit = tile->pitch = 0;
+}
+
+static int
+calc_bias(struct drm_device *dev, int k, int i, int j)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int b = (dev_priv->chipset > 0x30 ?
+ nv_rd32(dev, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
+ 0) & 0xf;
+
+ return 2 * (b & 0x8 ? b - 0x10 : b);
+}
+
+static int
+calc_ref(struct drm_device *dev, int l, int k, int i)
+{
+ int j, x = 0;
+
+ for (j = 0; j < 4; j++) {
+ int m = (l >> (8 * i) & 0xff) + calc_bias(dev, k, i, j);
+
+ x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
+ }
+
+ return x;
+}
+
+int
+nv30_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int i, j;
+
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_tile_region(dev, i);
+
+ /* Init the memory timing regs at 0x10037c/0x1003ac */
+ if (dev_priv->chipset == 0x30 ||
+ dev_priv->chipset == 0x31 ||
+ dev_priv->chipset == 0x35) {
+ /* Related to ROP count */
+ int n = (dev_priv->chipset == 0x31 ? 2 : 4);
+ int l = nv_rd32(dev, 0x1003d0);
+
+ for (i = 0; i < n; i++) {
+ for (j = 0; j < 3; j++)
+ nv_wr32(dev, 0x10037c + 0xc * i + 0x4 * j,
+ calc_ref(dev, l, 0, j));
+
+ for (j = 0; j < 2; j++)
+ nv_wr32(dev, 0x1003ac + 0x8 * i + 0x4 * j,
+ calc_ref(dev, l, 1, j));
+ }
+ }
+
+ return 0;
+}
+
+void
+nv30_fb_takedown(struct drm_device *dev)
+{
+}
--- /dev/null
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+void
+nv40_fb_set_tile_region(struct drm_device *dev, int i)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
+ break;
+
+ default:
+ nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
+ nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
+ nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
+ break;
+ }
+}
+
+static void
+nv40_fb_init_gart(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
+ nv_wr32(dev, 0x100800, 0x00000001);
+ return;
+ }
+
+ nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
+ nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
+ nv_wr32(dev, 0x100820, 0x00000000);
+}
+
+static void
+nv44_fb_init_gart(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
+ u32 vinst;
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
+ nv_wr32(dev, 0x100850, 0x80000000);
+ nv_wr32(dev, 0x100800, 0x00000001);
+ return;
+ }
+
+ /* calculate vram address of this PRAMIN block, object
+ * must be allocated on 512KiB alignment, and not exceed
+ * a total size of 512KiB for this to work correctly
+ */
+ vinst = nv_rd32(dev, 0x10020c);
+ vinst -= ((gart->pinst >> 19) + 1) << 19;
+
+ nv_wr32(dev, 0x100850, 0x80000000);
+ nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
+
+ nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
+ nv_wr32(dev, 0x100850, 0x00008000);
+ nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
+ nv_wr32(dev, 0x100820, 0x00000000);
+ nv_wr32(dev, 0x10082c, 0x00000001);
+ nv_wr32(dev, 0x100800, vinst | 0x00000010);
+}
+
+int
+nv40_fb_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* 0x001218 is actually present on a few other NV4X I looked at,
+ * and even contains sane values matching 0x100474. From looking
+ * at various vbios images however, this isn't the case everywhere.
+ * So, I chose to use the same regs I've seen NVIDIA reading around
+ * the memory detection, hopefully that'll get us the right numbers
+ */
+ if (dev_priv->chipset == 0x40) {
+ u32 pbus1218 = nv_rd32(dev, 0x001218);
+ switch (pbus1218 & 0x00000300) {
+ case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
+ case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
+ }
+ } else
+ if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
+ u32 pfb914 = nv_rd32(dev, 0x100914);
+ switch (pfb914 & 0x00000003) {
+ case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
+ case 0x00000001: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
+ case 0x00000002: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
+ case 0x00000003: break;
+ }
+ } else
+ if (dev_priv->chipset != 0x4e) {
+ u32 pfb474 = nv_rd32(dev, 0x100474);
+ if (pfb474 & 0x00000004)
+ dev_priv->vram_type = NV_MEM_TYPE_GDDR3;
+ if (pfb474 & 0x00000002)
+ dev_priv->vram_type = NV_MEM_TYPE_DDR2;
+ if (pfb474 & 0x00000001)
+ dev_priv->vram_type = NV_MEM_TYPE_DDR1;
+ } else {
+ dev_priv->vram_type = NV_MEM_TYPE_STOLEN;
+ }
+
+ dev_priv->vram_size = nv_rd32(dev, 0x10020c) & 0xff000000;
+ return 0;
+}
+
+int
+nv40_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ uint32_t tmp;
+ int i;
+
+ if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
+ if (nv44_graph_class(dev))
+ nv44_fb_init_gart(dev);
+ else
+ nv40_fb_init_gart(dev);
+ }
+
+ switch (dev_priv->chipset) {
+ case 0x40:
+ case 0x45:
+ tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
+ nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
+ pfb->num_tiles = NV10_PFB_TILE__SIZE;
+ break;
+ case 0x46: /* G72 */
+ case 0x47: /* G70 */
+ case 0x49: /* G71 */
+ case 0x4b: /* G73 */
+ case 0x4c: /* C51 (G7X version) */
+ pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
+ break;
+ default:
+ pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
+ break;
+ }
+
+ /* Turn all the tiling regions off. */
+ for (i = 0; i < pfb->num_tiles; i++)
+ pfb->set_tile_region(dev, i);
+
+ return 0;
+}
+
+void
+nv40_fb_takedown(struct drm_device *dev)
+{
+}
--- /dev/null
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include <engine/fifo.h>
+
+struct nv50_fb_priv {
+ struct page *r100c08_page;
+ dma_addr_t r100c08;
+};
+
+static void
+nv50_fb_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nv50_fb_priv *priv = pfb->priv;
+
+ if (drm_mm_initialized(&pfb->tag_heap))
+ drm_mm_takedown(&pfb->tag_heap);
+
+ if (priv->r100c08_page) {
+ pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c08_page);
+ }
+
+ kfree(priv);
+ pfb->priv = NULL;
+}
+
+static int
+nv50_fb_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nv50_fb_priv *priv;
+ u32 tagmem;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pfb->priv = priv;
+
+ priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!priv->r100c08_page) {
+ nv50_fb_destroy(dev);
+ return -ENOMEM;
+ }
+
+ priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
+ nv50_fb_destroy(dev);
+ return -EFAULT;
+ }
+
+ tagmem = nv_rd32(dev, 0x100320);
+ NV_DEBUG(dev, "%d tags available\n", tagmem);
+ ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
+ if (ret) {
+ nv50_fb_destroy(dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nv50_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_fb_priv *priv;
+ int ret;
+
+ if (!dev_priv->engine.fb.priv) {
+ ret = nv50_fb_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = dev_priv->engine.fb.priv;
+
+ /* Not a clue what this is exactly. Without pointing it at a
+ * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
+ * cause IOMMU "read from address 0" errors (rh#561267)
+ */
+ nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
+
+ /* This is needed to get meaningful information from 100c90
+ * on traps. No idea what these values mean exactly. */
+ switch (dev_priv->chipset) {
+ case 0x50:
+ nv_wr32(dev, 0x100c90, 0x000707ff);
+ break;
+ case 0xa3:
+ case 0xa5:
+ case 0xa8:
+ nv_wr32(dev, 0x100c90, 0x000d0fff);
+ break;
+ case 0xaf:
+ nv_wr32(dev, 0x100c90, 0x089d1fff);
+ break;
+ default:
+ nv_wr32(dev, 0x100c90, 0x001d07ff);
+ break;
+ }
+
+ return 0;
+}
+
+void
+nv50_fb_takedown(struct drm_device *dev)
+{
+ nv50_fb_destroy(dev);
+}
+
+static struct nouveau_enum vm_dispatch_subclients[] = {
+ { 0x00000000, "GRCTX", NULL },
+ { 0x00000001, "NOTIFY", NULL },
+ { 0x00000002, "QUERY", NULL },
+ { 0x00000003, "COND", NULL },
+ { 0x00000004, "M2M_IN", NULL },
+ { 0x00000005, "M2M_OUT", NULL },
+ { 0x00000006, "M2M_NOTIFY", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_ccache_subclients[] = {
+ { 0x00000000, "CB", NULL },
+ { 0x00000001, "TIC", NULL },
+ { 0x00000002, "TSC", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_prop_subclients[] = {
+ { 0x00000000, "RT0", NULL },
+ { 0x00000001, "RT1", NULL },
+ { 0x00000002, "RT2", NULL },
+ { 0x00000003, "RT3", NULL },
+ { 0x00000004, "RT4", NULL },
+ { 0x00000005, "RT5", NULL },
+ { 0x00000006, "RT6", NULL },
+ { 0x00000007, "RT7", NULL },
+ { 0x00000008, "ZETA", NULL },
+ { 0x00000009, "LOCAL", NULL },
+ { 0x0000000a, "GLOBAL", NULL },
+ { 0x0000000b, "STACK", NULL },
+ { 0x0000000c, "DST2D", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_pfifo_subclients[] = {
+ { 0x00000000, "PUSHBUF", NULL },
+ { 0x00000001, "SEMAPHORE", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_bar_subclients[] = {
+ { 0x00000000, "FB", NULL },
+ { 0x00000001, "IN", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_client[] = {
+ { 0x00000000, "STRMOUT", NULL },
+ { 0x00000003, "DISPATCH", vm_dispatch_subclients },
+ { 0x00000004, "PFIFO_WRITE", NULL },
+ { 0x00000005, "CCACHE", vm_ccache_subclients },
+ { 0x00000006, "PPPP", NULL },
+ { 0x00000007, "CLIPID", NULL },
+ { 0x00000008, "PFIFO_READ", NULL },
+ { 0x00000009, "VFETCH", NULL },
+ { 0x0000000a, "TEXTURE", NULL },
+ { 0x0000000b, "PROP", vm_prop_subclients },
+ { 0x0000000c, "PVP", NULL },
+ { 0x0000000d, "PBSP", NULL },
+ { 0x0000000e, "PCRYPT", NULL },
+ { 0x0000000f, "PCOUNTER", NULL },
+ { 0x00000011, "PDAEMON", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_engine[] = {
+ { 0x00000000, "PGRAPH", NULL },
+ { 0x00000001, "PVP", NULL },
+ { 0x00000004, "PEEPHOLE", NULL },
+ { 0x00000005, "PFIFO", vm_pfifo_subclients },
+ { 0x00000006, "BAR", vm_bar_subclients },
+ { 0x00000008, "PPPP", NULL },
+ { 0x00000009, "PBSP", NULL },
+ { 0x0000000a, "PCRYPT", NULL },
+ { 0x0000000b, "PCOUNTER", NULL },
+ { 0x0000000c, "SEMAPHORE_BG", NULL },
+ { 0x0000000d, "PCOPY", NULL },
+ { 0x0000000e, "PDAEMON", NULL },
+ {}
+};
+
+static struct nouveau_enum vm_fault[] = {
+ { 0x00000000, "PT_NOT_PRESENT", NULL },
+ { 0x00000001, "PT_TOO_SHORT", NULL },
+ { 0x00000002, "PAGE_NOT_PRESENT", NULL },
+ { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
+ { 0x00000004, "PAGE_READ_ONLY", NULL },
+ { 0x00000006, "NULL_DMAOBJ", NULL },
+ { 0x00000007, "WRONG_MEMTYPE", NULL },
+ { 0x0000000b, "VRAM_LIMIT", NULL },
+ { 0x0000000f, "DMAOBJ_LIMIT", NULL },
+ {}
+};
+
+void
+nv50_fb_vm_trap(struct drm_device *dev, int display)
+{
+ struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ const struct nouveau_enum *en, *cl;
+ unsigned long flags;
+ u32 trap[6], idx, chinst;
+ u8 st0, st1, st2, st3;
+ int i, ch;
+
+ idx = nv_rd32(dev, 0x100c90);
+ if (!(idx & 0x80000000))
+ return;
+ idx &= 0x00ffffff;
+
+ for (i = 0; i < 6; i++) {
+ nv_wr32(dev, 0x100c90, idx | i << 24);
+ trap[i] = nv_rd32(dev, 0x100c94);
+ }
+ nv_wr32(dev, 0x100c90, idx | 0x80000000);
+
+ if (!display)
+ return;
+
+ /* lookup channel id */
+ chinst = (trap[2] << 16) | trap[1];
+ spin_lock_irqsave(&dev_priv->channels.lock, flags);
+ for (ch = 0; ch < pfifo->channels; ch++) {
+ struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
+
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (chinst == chan->ramin->vinst >> 12)
+ break;
+ }
+ spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
+
+ /* decode status bits into something more useful */
+ if (dev_priv->chipset < 0xa3 ||
+ dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
+ st0 = (trap[0] & 0x0000000f) >> 0;
+ st1 = (trap[0] & 0x000000f0) >> 4;
+ st2 = (trap[0] & 0x00000f00) >> 8;
+ st3 = (trap[0] & 0x0000f000) >> 12;
+ } else {
+ st0 = (trap[0] & 0x000000ff) >> 0;
+ st1 = (trap[0] & 0x0000ff00) >> 8;
+ st2 = (trap[0] & 0x00ff0000) >> 16;
+ st3 = (trap[0] & 0xff000000) >> 24;
+ }
+
+ NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ",
+ (trap[5] & 0x00000100) ? "read" : "write",
+ trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst);
+
+ en = nouveau_enum_find(vm_engine, st0);
+ if (en)
+ printk("%s/", en->name);
+ else
+ printk("%02x/", st0);
+
+ cl = nouveau_enum_find(vm_client, st2);
+ if (cl)
+ printk("%s/", cl->name);
+ else
+ printk("%02x/", st2);
+
+ if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
+ else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
+ else cl = NULL;
+ if (cl)
+ printk("%s", cl->name);
+ else
+ printk("%02x", st3);
+
+ printk(" reason: ");
+ en = nouveau_enum_find(vm_fault, st1);
+ if (en)
+ printk("%s\n", en->name);
+ else
+ printk("0x%08x\n", st1);
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <core/mm.h>
+
+static int types[0x80] = {
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
+ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
+ 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
+};
+
+bool
+nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+
+ if (likely(type < ARRAY_SIZE(types) && types[type]))
+ return true;
+ return false;
+}
+
+void
+nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
+ struct nouveau_mm_node *this;
+ struct nouveau_mem *mem;
+
+ mem = *pmem;
+ *pmem = NULL;
+ if (unlikely(mem == NULL))
+ return;
+
+ mutex_lock(&mm->mutex);
+ while (!list_empty(&mem->regions)) {
+ this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+
+ list_del(&this->rl_entry);
+ nouveau_mm_put(mm, this);
+ }
+
+ if (mem->tag) {
+ drm_mm_put_block(mem->tag);
+ mem->tag = NULL;
+ }
+ mutex_unlock(&mm->mutex);
+
+ kfree(mem);
+}
+
+int
+nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
+ u32 memtype, struct nouveau_mem **pmem)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
+ struct nouveau_mm_node *r;
+ struct nouveau_mem *mem;
+ int comp = (memtype & 0x300) >> 8;
+ int type = (memtype & 0x07f);
+ int ret;
+
+ if (!types[type])
+ return -EINVAL;
+ size >>= 12;
+ align >>= 12;
+ size_nc >>= 12;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ mutex_lock(&mm->mutex);
+ if (comp) {
+ if (align == 16) {
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ int n = (size >> 4) * comp;
+
+ mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
+ if (mem->tag)
+ mem->tag = drm_mm_get_block(mem->tag, n, 0);
+ }
+
+ if (unlikely(!mem->tag))
+ comp = 0;
+ }
+
+ INIT_LIST_HEAD(&mem->regions);
+ mem->dev = dev_priv->dev;
+ mem->memtype = (comp << 7) | type;
+ mem->size = size;
+
+ do {
+ ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
+ if (ret) {
+ mutex_unlock(&mm->mutex);
+ nv50_vram_del(dev, &mem);
+ return ret;
+ }
+
+ list_add_tail(&r->rl_entry, &mem->regions);
+ size -= r->length;
+ } while (size);
+ mutex_unlock(&mm->mutex);
+
+ r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ mem->offset = (u64)r->offset << 12;
+ *pmem = mem;
+ return 0;
+}
+
+static u32
+nv50_vram_rblock(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru, rblock_size;
+
+ r0 = nv_rd32(dev, 0x100200);
+ r4 = nv_rd32(dev, 0x100204);
+ rt = nv_rd32(dev, 0x100250);
+ ru = nv_rd32(dev, 0x001540);
+ NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != dev_priv->vram_size) {
+ NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+ (u32)(dev_priv->vram_size >> 20));
+ NV_WARN(dev, "we calculated %dMiB VRAM\n",
+ (u32)(predicted >> 20));
+ }
+
+ rblock_size = rowsize;
+ if (rt & 1)
+ rblock_size *= 3;
+
+ NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
+ return rblock_size;
+}
+
+int
+nv50_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 pfb714 = nv_rd32(dev, 0x100714);
+ u32 rblock, length;
+
+ switch (pfb714 & 0x00000007) {
+ case 0: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
+ case 1:
+ if (nouveau_mem_vbios_type(dev) == NV_MEM_TYPE_DDR3)
+ dev_priv->vram_type = NV_MEM_TYPE_DDR3;
+ else
+ dev_priv->vram_type = NV_MEM_TYPE_DDR2;
+ break;
+ case 2: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
+ case 3: dev_priv->vram_type = NV_MEM_TYPE_GDDR4; break;
+ case 4: dev_priv->vram_type = NV_MEM_TYPE_GDDR5; break;
+ default:
+ break;
+ }
+
+ dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x100200) & 0x4);
+ dev_priv->vram_size = nv_rd32(dev, 0x10020c);
+ dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
+ dev_priv->vram_size &= 0xffffffff00ULL;
+
+ /* IGPs, no funky reordering happens here, they don't have VRAM */
+ if (dev_priv->chipset == 0xaa ||
+ dev_priv->chipset == 0xac ||
+ dev_priv->chipset == 0xaf) {
+ dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
+ rblock = 4096 >> 12;
+ } else {
+ rblock = nv50_vram_rblock(dev) >> 12;
+ }
+
+ length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
+
+ return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
+}
+
+void
+nv50_vram_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+
+ nouveau_mm_fini(&vram->mm);
+}
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+struct nvc0_fb_priv {
+ struct page *r100c10_page;
+ dma_addr_t r100c10;
+};
+
+static inline void
+nvc0_mfb_subp_isr(struct drm_device *dev, int unit, int subp)
+{
+ u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
+ u32 stat = nv_rd32(dev, subp_base + 0x020);
+
+ if (stat) {
+ NV_INFO(dev, "PMFB%d_SUBP%d: 0x%08x\n", unit, subp, stat);
+ nv_wr32(dev, subp_base + 0x020, stat);
+ }
+}
+
+static void
+nvc0_mfb_isr(struct drm_device *dev)
+{
+ u32 units = nv_rd32(dev, 0x00017c);
+ while (units) {
+ u32 subp, unit = ffs(units) - 1;
+ for (subp = 0; subp < 2; subp++)
+ nvc0_mfb_subp_isr(dev, unit, subp);
+ units &= ~(1 << unit);
+ }
+
+ /* we do something horribly wrong and upset PMFB a lot, so mask off
+ * interrupts from it after the first one until it's fixed
+ */
+ nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
+}
+
+static void
+nvc0_fb_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nvc0_fb_priv *priv = pfb->priv;
+
+ nouveau_irq_unregister(dev, 25);
+
+ if (priv->r100c10_page) {
+ pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ __free_page(priv->r100c10_page);
+ }
+
+ kfree(priv);
+ pfb->priv = NULL;
+}
+
+static int
+nvc0_fb_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+ struct nvc0_fb_priv *priv;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pfb->priv = priv;
+
+ priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!priv->r100c10_page) {
+ nvc0_fb_destroy(dev);
+ return -ENOMEM;
+ }
+
+ priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
+ nvc0_fb_destroy(dev);
+ return -EFAULT;
+ }
+
+ nouveau_irq_register(dev, 25, nvc0_mfb_isr);
+ return 0;
+}
+
+int
+nvc0_fb_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_fb_priv *priv;
+ int ret;
+
+ if (!dev_priv->engine.fb.priv) {
+ ret = nvc0_fb_create(dev);
+ if (ret)
+ return ret;
+ }
+ priv = dev_priv->engine.fb.priv;
+
+ nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
+ nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */
+ return 0;
+}
+
+void
+nvc0_fb_takedown(struct drm_device *dev)
+{
+ nvc0_fb_destroy(dev);
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <core/mm.h>
+
+/* 0 = unsupported
+ * 1 = non-compressed
+ * 3 = compressed
+ */
+static const u8 types[256] = {
+ 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
+ 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
+ 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
+ 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
+ 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
+ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
+};
+
+bool
+nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
+ return likely((types[memtype] == 1));
+}
+
+int
+nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
+ u32 type, struct nouveau_mem **pmem)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
+ struct nouveau_mm_node *r;
+ struct nouveau_mem *mem;
+ int ret;
+
+ size >>= 12;
+ align >>= 12;
+ ncmin >>= 12;
+
+ mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+ if (!mem)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&mem->regions);
+ mem->dev = dev_priv->dev;
+ mem->memtype = (type & 0xff);
+ mem->size = size;
+
+ mutex_lock(&mm->mutex);
+ do {
+ ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
+ if (ret) {
+ mutex_unlock(&mm->mutex);
+ nv50_vram_del(dev, &mem);
+ return ret;
+ }
+
+ list_add_tail(&r->rl_entry, &mem->regions);
+ size -= r->length;
+ } while (size);
+ mutex_unlock(&mm->mutex);
+
+ r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
+ mem->offset = (u64)r->offset << 12;
+ *pmem = mem;
+ return 0;
+}
+
+int
+nvc0_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
+ const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 parts = nv_rd32(dev, 0x022438);
+ u32 pmask = nv_rd32(dev, 0x022554);
+ u32 bsize = nv_rd32(dev, 0x10f20c);
+ u32 offset, length;
+ bool uniform = true;
+ int ret, part;
+
+ NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
+ NV_DEBUG(dev, "parts 0x%08x mask 0x%08x\n", parts, pmask);
+
+ dev_priv->vram_type = nouveau_mem_vbios_type(dev);
+ dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x10f200) & 0x00000004);
+
+ /* read amount of vram attached to each memory controller */
+ for (part = 0; part < parts; part++) {
+ if (!(pmask & (1 << part))) {
+ u32 psize = nv_rd32(dev, 0x11020c + (part * 0x1000));
+ if (psize != bsize) {
+ if (psize < bsize)
+ bsize = psize;
+ uniform = false;
+ }
+
+ NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
+ dev_priv->vram_size += (u64)psize << 20;
+ }
+ }
+
+ /* if all controllers have the same amount attached, there's no holes */
+ if (uniform) {
+ offset = rsvd_head;
+ length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
+ return nouveau_mm_init(&vram->mm, offset, length, 1);
+ }
+
+ /* otherwise, address lowest common amount from 0GiB */
+ ret = nouveau_mm_init(&vram->mm, rsvd_head, (bsize << 8) * parts, 1);
+ if (ret)
+ return ret;
+
+ /* and the rest starting from (8GiB + common_size) */
+ offset = (0x0200000000ULL >> 12) + (bsize << 8);
+ length = (dev_priv->vram_size >> 12) - (bsize << 8) - rsvd_tail;
+
+ ret = nouveau_mm_init(&vram->mm, offset, length, 0);
+ if (ret) {
+ nouveau_mm_fini(&vram->mm);
+ return ret;
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2011 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <subdev/i2c.h>
+#include <subdev/gpio.h>
+
+static u8 *
+dcb_gpio_table(struct drm_device *dev)
+{
+ u8 *dcb = dcb_table(dev);
+ if (dcb) {
+ if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
+ return ROMPTR(dev, dcb[0x0a]);
+ if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
+ return ROMPTR(dev, dcb[-15]);
+ }
+ return NULL;
+}
+
+static u8 *
+dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
+{
+ u8 *table = dcb_gpio_table(dev);
+ if (table) {
+ *version = table[0];
+ if (*version < 0x30 && ent < table[2])
+ return table + 3 + (ent * table[1]);
+ else if (ent < table[2])
+ return table + table[1] + (ent * table[3]);
+ }
+ return NULL;
+}
+
+int
+nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
+}
+
+int
+nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
+}
+
+int
+nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
+ struct gpio_func *gpio)
+{
+ u8 *table, *entry, version;
+ int i = -1;
+
+ if (line == 0xff && func == 0xff)
+ return -EINVAL;
+
+ while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
+ if (version < 0x40) {
+ u16 data = ROM16(entry[0]);
+ *gpio = (struct gpio_func) {
+ .line = (data & 0x001f) >> 0,
+ .func = (data & 0x07e0) >> 5,
+ .log[0] = (data & 0x1800) >> 11,
+ .log[1] = (data & 0x6000) >> 13,
+ };
+ } else
+ if (version < 0x41) {
+ *gpio = (struct gpio_func) {
+ .line = entry[0] & 0x1f,
+ .func = entry[1],
+ .log[0] = (entry[3] & 0x18) >> 3,
+ .log[1] = (entry[3] & 0x60) >> 5,
+ };
+ } else {
+ *gpio = (struct gpio_func) {
+ .line = entry[0] & 0x3f,
+ .func = entry[1],
+ .log[0] = (entry[4] & 0x30) >> 4,
+ .log[1] = (entry[4] & 0xc0) >> 6,
+ };
+ }
+
+ if ((line == 0xff || line == gpio->line) &&
+ (func == 0xff || func == gpio->func))
+ return 0;
+ }
+
+ /* DCB 2.2, fixed TVDAC GPIO data */
+ if ((table = dcb_table(dev)) && table[0] >= 0x22) {
+ if (func == DCB_GPIO_TVDAC0) {
+ *gpio = (struct gpio_func) {
+ .func = DCB_GPIO_TVDAC0,
+ .line = table[-4] >> 4,
+ .log[0] = !!(table[-5] & 2),
+ .log[1] = !(table[-5] & 2),
+ };
+ return 0;
+ }
+ }
+
+ /* Apple iMac G4 NV18 */
+ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+ if (func == DCB_GPIO_TVDAC0) {
+ *gpio = (struct gpio_func) {
+ .func = DCB_GPIO_TVDAC0,
+ .line = 4,
+ .log[0] = 0,
+ .log[1] = 1,
+ };
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
+{
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ int dir = !!(gpio.log[state] & 0x02);
+ int out = !!(gpio.log[state] & 0x01);
+ ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
+ }
+
+ return ret;
+}
+
+int
+nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
+{
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ ret = nouveau_gpio_sense(dev, idx, gpio.line);
+ if (ret >= 0)
+ ret = (ret == (gpio.log[1] & 1));
+ }
+
+ return ret;
+}
+
+int
+nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_func gpio;
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
+ if (ret == 0) {
+ if (idx == 0 && pgpio->irq_enable)
+ pgpio->irq_enable(dev, gpio.line, on);
+ else
+ ret = -ENODEV;
+ }
+
+ return ret;
+}
+
+struct gpio_isr {
+ struct drm_device *dev;
+ struct list_head head;
+ struct work_struct work;
+ int idx;
+ struct gpio_func func;
+ void (*handler)(void *, int);
+ void *data;
+ bool inhibit;
+};
+
+static void
+nouveau_gpio_isr_bh(struct work_struct *work)
+{
+ struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
+ struct drm_device *dev = isr->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ unsigned long flags;
+ int state;
+
+ state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
+ if (state >= 0)
+ isr->handler(isr->data, state);
+
+ spin_lock_irqsave(&pgpio->lock, flags);
+ isr->inhibit = false;
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+}
+
+void
+nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr;
+
+ if (idx != 0)
+ return;
+
+ spin_lock(&pgpio->lock);
+ list_for_each_entry(isr, &pgpio->isr, head) {
+ if (line_mask & (1 << isr->func.line)) {
+ if (isr->inhibit)
+ continue;
+ isr->inhibit = true;
+ schedule_work(&isr->work);
+ }
+ }
+ spin_unlock(&pgpio->lock);
+}
+
+int
+nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr;
+ unsigned long flags;
+ int ret;
+
+ isr = kzalloc(sizeof(*isr), GFP_KERNEL);
+ if (!isr)
+ return -ENOMEM;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
+ if (ret) {
+ kfree(isr);
+ return ret;
+ }
+
+ INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
+ isr->dev = dev;
+ isr->handler = handler;
+ isr->data = data;
+ isr->idx = idx;
+
+ spin_lock_irqsave(&pgpio->lock, flags);
+ list_add(&isr->head, &pgpio->isr);
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+ return 0;
+}
+
+void
+nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
+ void (*handler)(void *, int), void *data)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ struct gpio_isr *isr, *tmp;
+ struct gpio_func func;
+ unsigned long flags;
+ LIST_HEAD(tofree);
+ int ret;
+
+ ret = nouveau_gpio_find(dev, idx, tag, line, &func);
+ if (ret == 0) {
+ spin_lock_irqsave(&pgpio->lock, flags);
+ list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
+ if (memcmp(&isr->func, &func, sizeof(func)) ||
+ isr->idx != idx ||
+ isr->handler != handler || isr->data != data)
+ continue;
+ list_move(&isr->head, &tofree);
+ }
+ spin_unlock_irqrestore(&pgpio->lock, flags);
+
+ list_for_each_entry_safe(isr, tmp, &tofree, head) {
+ flush_work_sync(&isr->work);
+ kfree(isr);
+ }
+ }
+}
+
+int
+nouveau_gpio_create(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ INIT_LIST_HEAD(&pgpio->isr);
+ spin_lock_init(&pgpio->lock);
+
+ return nouveau_gpio_init(dev);
+}
+
+void
+nouveau_gpio_destroy(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ nouveau_gpio_fini(dev);
+ BUG_ON(!list_empty(&pgpio->isr));
+}
+
+int
+nouveau_gpio_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+ int ret = 0;
+
+ if (pgpio->init)
+ ret = pgpio->init(dev);
+
+ return ret;
+}
+
+void
+nouveau_gpio_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
+
+ if (pgpio->fini)
+ pgpio->fini(dev);
+}
+
+void
+nouveau_gpio_reset(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u8 *entry, version;
+ int ent = -1;
+
+ while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
+ u8 func = 0xff, line, defs, unk0, unk1;
+ if (version >= 0x41) {
+ defs = !!(entry[0] & 0x80);
+ line = entry[0] & 0x3f;
+ func = entry[1];
+ unk0 = entry[2];
+ unk1 = entry[3] & 0x1f;
+ } else
+ if (version >= 0x40) {
+ line = entry[0] & 0x1f;
+ func = entry[1];
+ defs = !!(entry[3] & 0x01);
+ unk0 = !!(entry[3] & 0x02);
+ unk1 = !!(entry[3] & 0x04);
+ } else {
+ break;
+ }
+
+ if (func == 0xff)
+ continue;
+
+ nouveau_gpio_func_set(dev, func, defs);
+
+ if (dev_priv->card_type >= NV_D0) {
+ nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
+ if (unk1--)
+ nv_mask(dev, 0x00d740 + (unk1 * 4), 0xff, line);
+ } else
+ if (dev_priv->card_type >= NV_50) {
+ static const u32 regs[] = { 0xe100, 0xe28c };
+ u32 val = (unk1 << 16) | unk0;
+ u32 reg = regs[line >> 4]; line &= 0x0f;
+
+ nv_mask(dev, reg, 0x00010001 << line, val << line);
+ }
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2009 Francisco Jerez.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include <subdev/gpio.h>
+
+int
+nv10_gpio_sense(struct drm_device *dev, int line)
+{
+ if (line < 2) {
+ line = line * 16;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
+ return !!(line & 0x0100);
+ } else
+ if (line < 10) {
+ line = (line - 2) * 4;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
+ return !!(line & 0x04);
+ } else
+ if (line < 14) {
+ line = (line - 10) * 4;
+ line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
+ return !!(line & 0x04);
+ }
+
+ return -EINVAL;
+}
+
+int
+nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+{
+ u32 reg, mask, data;
+
+ if (line < 2) {
+ line = line * 16;
+ reg = NV_PCRTC_GPIO;
+ mask = 0x00000011;
+ data = (dir << 4) | out;
+ } else
+ if (line < 10) {
+ line = (line - 2) * 4;
+ reg = NV_PCRTC_GPIO_EXT;
+ mask = 0x00000003;
+ data = (dir << 1) | out;
+ } else
+ if (line < 14) {
+ line = (line - 10) * 4;
+ reg = NV_PCRTC_850;
+ mask = 0x00000003;
+ data = (dir << 1) | out;
+ } else {
+ return -EINVAL;
+ }
+
+ mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
+ NVWriteCRTC(dev, 0, reg, mask | (data << line));
+ return 0;
+}
+
+void
+nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
+{
+ u32 mask = 0x00010001 << line;
+
+ nv_wr32(dev, 0x001104, mask);
+ nv_mask(dev, 0x001144, mask, on ? mask : 0);
+}
+
+static void
+nv10_gpio_isr(struct drm_device *dev)
+{
+ u32 intr = nv_rd32(dev, 0x1104);
+ u32 hi = (intr & 0x0000ffff) >> 0;
+ u32 lo = (intr & 0xffff0000) >> 16;
+
+ nouveau_gpio_isr(dev, 0, hi | lo);
+
+ nv_wr32(dev, 0x001104, intr);
+}
+
+int
+nv10_gpio_init(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x001140, 0x00000000);
+ nv_wr32(dev, 0x001100, 0xffffffff);
+ nv_wr32(dev, 0x001144, 0x00000000);
+ nv_wr32(dev, 0x001104, 0xffffffff);
+ nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
+ return 0;
+}
+
+void
+nv10_gpio_fini(struct drm_device *dev)
+{
+ nv_wr32(dev, 0x001140, 0x00000000);
+ nv_wr32(dev, 0x001144, 0x00000000);
+ nouveau_irq_unregister(dev, 28);
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/dmi.h>
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_hw.h"
+#include <subdev/gpio.h>
+
+#include "nv50_display.h"
+
+static int
+nv50_gpio_location(int line, u32 *reg, u32 *shift)
+{
+ const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
+
+ if (line >= 32)
+ return -EINVAL;
+
+ *reg = nv50_gpio_reg[line >> 3];
+ *shift = (line & 7) << 2;
+ return 0;
+}
+
+int
+nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+{
+ u32 reg, shift;
+
+ if (nv50_gpio_location(line, ®, &shift))
+ return -EINVAL;
+
+ nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
+ return 0;
+}
+
+int
+nv50_gpio_sense(struct drm_device *dev, int line)
+{
+ u32 reg, shift;
+
+ if (nv50_gpio_location(line, ®, &shift))
+ return -EINVAL;
+
+ return !!(nv_rd32(dev, reg) & (4 << shift));
+}
+
+void
+nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
+{
+ u32 reg = line < 16 ? 0xe050 : 0xe070;
+ u32 mask = 0x00010001 << (line & 0xf);
+
+ nv_wr32(dev, reg + 4, mask);
+ nv_mask(dev, reg + 0, mask, on ? mask : 0);
+}
+
+int
+nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
+{
+ u32 data = ((dir ^ 1) << 13) | (out << 12);
+ nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
+ nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
+ return 0;
+}
+
+int
+nvd0_gpio_sense(struct drm_device *dev, int line)
+{
+ return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
+}
+
+static void
+nv50_gpio_isr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 intr0, intr1 = 0;
+ u32 hi, lo;
+
+ intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
+ if (dev_priv->chipset >= 0x90)
+ intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
+
+ hi = (intr0 & 0x0000ffff) | (intr1 << 16);
+ lo = (intr0 >> 16) | (intr1 & 0xffff0000);
+ nouveau_gpio_isr(dev, 0, hi | lo);
+
+ nv_wr32(dev, 0xe054, intr0);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe074, intr1);
+}
+
+static struct dmi_system_id gpio_reset_ids[] = {
+ {
+ .ident = "Apple Macbook 10,1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
+ }
+ },
+ { }
+};
+
+int
+nv50_gpio_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* initialise gpios and routing to vbios defaults */
+ if (dmi_check_system(gpio_reset_ids))
+ nouveau_gpio_reset(dev);
+
+ /* disable, and ack any pending gpio interrupts */
+ nv_wr32(dev, 0xe050, 0x00000000);
+ nv_wr32(dev, 0xe054, 0xffffffff);
+ if (dev_priv->chipset >= 0x90) {
+ nv_wr32(dev, 0xe070, 0x00000000);
+ nv_wr32(dev, 0xe074, 0xffffffff);
+ }
+
+ nouveau_irq_register(dev, 21, nv50_gpio_isr);
+ return 0;
+}
+
+void
+nv50_gpio_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nv_wr32(dev, 0xe050, 0x00000000);
+ if (dev_priv->chipset >= 0x90)
+ nv_wr32(dev, 0xe070, 0x00000000);
+ nouveau_irq_unregister(dev, 21);
+}
--- /dev/null
+/*
+ * Copyright 2009 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include <linux/module.h>
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <subdev/i2c.h>
+#include "nouveau_hw.h"
+
+static void
+i2c_drive_scl(void *data, int state)
+{
+ struct nouveau_i2c_chan *port = data;
+ if (port->type == 0) {
+ u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+ if (state) val |= 0x20;
+ else val &= 0xdf;
+ NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+ } else
+ if (port->type == 4) {
+ nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
+ } else
+ if (port->type == 5) {
+ if (state) port->state |= 0x01;
+ else port->state &= 0xfe;
+ nv_wr32(port->dev, port->drive, 4 | port->state);
+ }
+}
+
+static void
+i2c_drive_sda(void *data, int state)
+{
+ struct nouveau_i2c_chan *port = data;
+ if (port->type == 0) {
+ u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
+ if (state) val |= 0x10;
+ else val &= 0xef;
+ NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
+ } else
+ if (port->type == 4) {
+ nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
+ } else
+ if (port->type == 5) {
+ if (state) port->state |= 0x02;
+ else port->state &= 0xfd;
+ nv_wr32(port->dev, port->drive, 4 | port->state);
+ }
+}
+
+static int
+i2c_sense_scl(void *data)
+{
+ struct nouveau_i2c_chan *port = data;
+ struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+ if (port->type == 0) {
+ return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
+ } else
+ if (port->type == 4) {
+ return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
+ } else
+ if (port->type == 5) {
+ if (dev_priv->card_type < NV_D0)
+ return !!(nv_rd32(port->dev, port->sense) & 0x01);
+ else
+ return !!(nv_rd32(port->dev, port->sense) & 0x10);
+ }
+ return 0;
+}
+
+static int
+i2c_sense_sda(void *data)
+{
+ struct nouveau_i2c_chan *port = data;
+ struct drm_nouveau_private *dev_priv = port->dev->dev_private;
+ if (port->type == 0) {
+ return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
+ } else
+ if (port->type == 4) {
+ return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
+ } else
+ if (port->type == 5) {
+ if (dev_priv->card_type < NV_D0)
+ return !!(nv_rd32(port->dev, port->sense) & 0x02);
+ else
+ return !!(nv_rd32(port->dev, port->sense) & 0x20);
+ }
+ return 0;
+}
+
+static const uint32_t nv50_i2c_port[] = {
+ 0x00e138, 0x00e150, 0x00e168, 0x00e180,
+ 0x00e254, 0x00e274, 0x00e764, 0x00e780,
+ 0x00e79c, 0x00e7b8
+};
+
+static u8 *
+i2c_table(struct drm_device *dev, u8 *version)
+{
+ u8 *dcb = dcb_table(dev), *i2c = NULL;
+ if (dcb) {
+ if (dcb[0] >= 0x15)
+ i2c = ROMPTR(dev, dcb[2]);
+ if (dcb[0] >= 0x30)
+ i2c = ROMPTR(dev, dcb[4]);
+ }
+
+ /* early revisions had no version number, use dcb version */
+ if (i2c) {
+ *version = dcb[0];
+ if (*version >= 0x30)
+ *version = i2c[0];
+ }
+
+ return i2c;
+}
+
+int
+nouveau_i2c_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct nouveau_i2c_chan *port;
+ u8 version = 0x00, entries, recordlen;
+ u8 *i2c, *entry, legacy[2][4] = {};
+ int ret, i;
+
+ INIT_LIST_HEAD(&dev_priv->i2c_ports);
+
+ i2c = i2c_table(dev, &version);
+ if (!i2c) {
+ u8 *bmp = &bios->data[bios->offset];
+ if (bios->type != NVBIOS_BMP)
+ return -ENODEV;
+
+ legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
+ legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
+ legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
+ legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
+
+ /* BMP (from v4.0) has i2c info in the structure, it's in a
+ * fixed location on earlier VBIOS
+ */
+ if (bmp[5] < 4)
+ i2c = &bios->data[0x48];
+ else
+ i2c = &bmp[0x36];
+
+ if (i2c[4]) legacy[0][0] = i2c[4];
+ if (i2c[5]) legacy[0][1] = i2c[5];
+ if (i2c[6]) legacy[1][0] = i2c[6];
+ if (i2c[7]) legacy[1][1] = i2c[7];
+ }
+
+ if (version >= 0x30) {
+ entry = i2c[1] + i2c;
+ entries = i2c[2];
+ recordlen = i2c[3];
+ } else
+ if (version) {
+ entry = i2c;
+ entries = 16;
+ recordlen = 4;
+ } else {
+ entry = legacy[0];
+ entries = 2;
+ recordlen = 4;
+ }
+
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (port == NULL) {
+ nouveau_i2c_fini(dev);
+ return -ENOMEM;
+ }
+
+ port->type = entry[3];
+ if (version < 0x30) {
+ port->type &= 0x07;
+ if (port->type == 0x07)
+ port->type = 0xff;
+ }
+
+ if (port->type == 0xff) {
+ kfree(port);
+ continue;
+ }
+
+ switch (port->type) {
+ case 0: /* NV04:NV50 */
+ port->drive = entry[0];
+ port->sense = entry[1];
+ break;
+ case 4: /* NV4E */
+ port->drive = 0x600800 + entry[1];
+ port->sense = port->drive;
+ break;
+ case 5: /* NV50- */
+ port->drive = entry[0] & 0x0f;
+ if (dev_priv->card_type < NV_D0) {
+ if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
+ break;
+ port->drive = nv50_i2c_port[port->drive];
+ port->sense = port->drive;
+ } else {
+ port->drive = 0x00d014 + (port->drive * 0x20);
+ port->sense = port->drive;
+ }
+ break;
+ case 6: /* NV50- DP AUX */
+ port->drive = entry[0] & 0x0f;
+ port->sense = port->drive;
+ port->adapter.algo = &nouveau_dp_i2c_algo;
+ break;
+ default:
+ break;
+ }
+
+ if (!port->adapter.algo && !port->drive) {
+ NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
+ i, port->type, port->drive, port->sense);
+ kfree(port);
+ continue;
+ }
+
+ snprintf(port->adapter.name, sizeof(port->adapter.name),
+ "nouveau-%s-%d", pci_name(dev->pdev), i);
+ port->adapter.owner = THIS_MODULE;
+ port->adapter.dev.parent = &dev->pdev->dev;
+ port->dev = dev;
+ port->index = i;
+ port->dcb = ROM32(entry[0]);
+ i2c_set_adapdata(&port->adapter, i2c);
+
+ if (port->adapter.algo != &nouveau_dp_i2c_algo) {
+ port->adapter.algo_data = &port->bit;
+ port->bit.udelay = 10;
+ port->bit.timeout = usecs_to_jiffies(2200);
+ port->bit.data = port;
+ port->bit.setsda = i2c_drive_sda;
+ port->bit.setscl = i2c_drive_scl;
+ port->bit.getsda = i2c_sense_sda;
+ port->bit.getscl = i2c_sense_scl;
+
+ i2c_drive_scl(port, 0);
+ i2c_drive_sda(port, 1);
+ i2c_drive_scl(port, 1);
+
+ ret = i2c_bit_add_bus(&port->adapter);
+ } else {
+ port->adapter.algo = &nouveau_dp_i2c_algo;
+ ret = i2c_add_adapter(&port->adapter);
+ }
+
+ if (ret) {
+ NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
+ kfree(port);
+ continue;
+ }
+
+ list_add_tail(&port->head, &dev_priv->i2c_ports);
+ }
+
+ return 0;
+}
+
+void
+nouveau_i2c_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *port, *tmp;
+
+ list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
+ i2c_del_adapter(&port->adapter);
+ kfree(port);
+ }
+}
+
+struct nouveau_i2c_chan *
+nouveau_i2c_find(struct drm_device *dev, u8 index)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_i2c_chan *port;
+
+ if (index == NV_I2C_DEFAULT(0) ||
+ index == NV_I2C_DEFAULT(1)) {
+ u8 version, *i2c = i2c_table(dev, &version);
+ if (i2c && version >= 0x30) {
+ if (index == NV_I2C_DEFAULT(0))
+ index = (i2c[4] & 0x0f);
+ else
+ index = (i2c[4] & 0xf0) >> 4;
+ } else {
+ index = 2;
+ }
+ }
+
+ list_for_each_entry(port, &dev_priv->i2c_ports, head) {
+ if (port->index == index)
+ break;
+ }
+
+ if (&port->head == &dev_priv->i2c_ports)
+ return NULL;
+
+ if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
+ u32 reg = 0x00e500, val;
+ if (port->type == 6) {
+ reg += port->drive * 0x50;
+ val = 0x2002;
+ } else {
+ reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
+ val = 0xe001;
+ }
+
+ /* nfi, but neither auxch or i2c work if it's 1 */
+ nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000);
+ /* nfi, but switches auxch vs normal i2c */
+ nv_mask(dev, reg + 0x00, 0x0000f003, val);
+ }
+
+ return port;
+}
+
+bool
+nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
+{
+ uint8_t buf[] = { 0 };
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr,
+ .flags = 0,
+ .len = 1,
+ .buf = buf,
+ },
+ {
+ .addr = addr,
+ .flags = I2C_M_RD,
+ .len = 1,
+ .buf = buf,
+ }
+ };
+
+ return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
+}
+
+int
+nouveau_i2c_identify(struct drm_device *dev, const char *what,
+ struct i2c_board_info *info,
+ bool (*match)(struct nouveau_i2c_chan *,
+ struct i2c_board_info *),
+ int index)
+{
+ struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
+ int i;
+
+ if (!i2c) {
+ NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
+ return -ENODEV;
+ }
+
+ NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
+ for (i = 0; info[i].addr; i++) {
+ if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
+ (!match || match(i2c, &info[i]))) {
+ NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
+ return i;
+ }
+ }
+
+ NV_DEBUG(dev, "No devices found.\n");
+ return -ENODEV;
+}
--- /dev/null
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include <engine/fifo.h>
+#include <core/ramht.h>
+
+/* returns the size of fifo context */
+static int
+nouveau_fifo_ctx_size(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->chipset >= 0x40)
+ return 128 * 32;
+ else
+ if (dev_priv->chipset >= 0x17)
+ return 64 * 32;
+ else
+ if (dev_priv->chipset >= 0x10)
+ return 32 * 32;
+
+ return 32 * 16;
+}
+
+int nv04_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramht = NULL;
+ u32 offset, length;
+ int ret;
+
+ /* RAMIN always available */
+ dev_priv->ramin_available = true;
+
+ /* Reserve space at end of VRAM for PRAMIN */
+ if (dev_priv->card_type >= NV_40) {
+ u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
+ u32 rsvd;
+
+ /* estimate grctx size, the magics come from nv40_grctx.c */
+ if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
+ else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
+ else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
+ else rsvd = 0x4a40 * vs;
+ rsvd += 16 * 1024;
+ rsvd *= 32; /* per-channel */
+
+ rsvd += 512 * 1024; /* pci(e)gart table */
+ rsvd += 512 * 1024; /* object storage */
+
+ dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
+ } else {
+ dev_priv->ramin_rsvd_vram = 512 * 1024;
+ }
+
+ /* Setup shared RAMHT */
+ ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &ramht);
+ if (ret)
+ return ret;
+
+ ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
+ nouveau_gpuobj_ref(NULL, &ramht);
+ if (ret)
+ return ret;
+
+ /* And RAMRO */
+ ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
+ NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
+ if (ret)
+ return ret;
+
+ /* And RAMFC */
+ length = nouveau_fifo_ctx_size(dev);
+ switch (dev_priv->card_type) {
+ case NV_40:
+ offset = 0x20000;
+ break;
+ default:
+ offset = 0x11400;
+ break;
+ }
+
+ ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
+ NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
+ if (ret)
+ return ret;
+
+ /* Only allow space after RAMFC to be used for object allocation */
+ offset += length;
+
+ /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
+ * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
+ * ("new style" control) the upper 16-bits of 0x2220 points at this
+ * other mysterious table that's clobbering important things.
+ *
+ * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
+ * smashed to pieces on us, so reserve 0x30000-0x40000 too..
+ */
+ if (dev_priv->card_type >= NV_40) {
+ if (offset < 0x40000)
+ offset = 0x40000;
+ }
+
+ ret = drm_mm_init(&dev_priv->ramin_heap, offset,
+ dev_priv->ramin_rsvd_vram - offset);
+ if (ret) {
+ NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nv04_instmem_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
+ nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
+ nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
+
+ if (drm_mm_initialized(&dev_priv->ramin_heap))
+ drm_mm_takedown(&dev_priv->ramin_heap);
+}
+
+int
+nv04_instmem_suspend(struct drm_device *dev)
+{
+ return 0;
+}
+
+void
+nv04_instmem_resume(struct drm_device *dev)
+{
+}
+
+int
+nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
+ u32 size, u32 align)
+{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct drm_mm_node *ramin = NULL;
+
+ do {
+ if (drm_mm_pre_get(&dev_priv->ramin_heap))
+ return -ENOMEM;
+
+ spin_lock(&dev_priv->ramin_lock);
+ ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
+ if (ramin == NULL) {
+ spin_unlock(&dev_priv->ramin_lock);
+ return -ENOMEM;
+ }
+
+ ramin = drm_mm_get_block_atomic(ramin, size, align);
+ spin_unlock(&dev_priv->ramin_lock);
+ } while (ramin == NULL);
+
+ gpuobj->node = ramin;
+ gpuobj->vinst = ramin->start;
+ return 0;
+}
+
+void
+nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+
+ spin_lock(&dev_priv->ramin_lock);
+ drm_mm_put_block(gpuobj->node);
+ gpuobj->node = NULL;
+ spin_unlock(&dev_priv->ramin_lock);
+}
+
+int
+nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
+{
+ gpuobj->pinst = gpuobj->vinst;
+ return 0;
+}
+
+void
+nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
+{
+}
+
+void
+nv04_instmem_flush(struct drm_device *dev)
+{
+}
--- /dev/null
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "nouveau_drv.h"
+#include <subdev/vm.h>
+
+#define BAR1_VM_BASE 0x0020000000ULL
+#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
+#define BAR3_VM_BASE 0x0000000000ULL
+#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
+
+struct nv50_instmem_priv {
+ uint32_t save1700[5]; /* 0x1700->0x1710 */
+
+ struct nouveau_gpuobj *bar1_dmaobj;
+ struct nouveau_gpuobj *bar3_dmaobj;
+};
+
+static void
+nv50_channel_del(struct nouveau_channel **pchan)
+{
+ struct nouveau_channel *chan;
+
+ chan = *pchan;
+ *pchan = NULL;
+ if (!chan)
+ return;
+
+ nouveau_gpuobj_ref(NULL, &chan->ramfc);
+ nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
+ nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+ if (drm_mm_initialized(&chan->ramin_heap))
+ drm_mm_takedown(&chan->ramin_heap);
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
+ kfree(chan);
+}
+
+static int
+nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
+ struct nouveau_channel **pchan)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
+ u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
+ struct nouveau_channel *chan;
+ int ret, i;
+
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
+
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
+ chan->ramin->pinst + pgd,
+ chan->ramin->vinst + pgd,
+ 0x4000, NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->vm_pd);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ for (i = 0; i < 0x4000; i += 8) {
+ nv_wo32(chan->vm_pd, i + 0, 0x00000000);
+ nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
+ }
+
+ ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
+ chan->ramin->pinst + fc,
+ chan->ramin->vinst + fc, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
+ if (ret) {
+ nv50_channel_del(&chan);
+ return ret;
+ }
+
+ *pchan = chan;
+ return 0;
+}
+
+int
+nv50_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv;
+ struct nouveau_channel *chan;
+ struct nouveau_vm *vm;
+ int ret, i;
+ u32 tmp;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->engine.instmem.priv = priv;
+
+ /* Save state, will restore at takedown. */
+ for (i = 0x1700; i <= 0x1710; i += 4)
+ priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
+
+ /* Global PRAMIN heap */
+ ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
+ if (ret) {
+ NV_ERROR(dev, "Failed to init RAMIN heap\n");
+ goto error;
+ }
+
+ /* BAR3 */
+ ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
+ &dev_priv->bar3_vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
+ 0x1000, NVOBJ_FLAG_DONT_MAP |
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->bar3_vm->pgt[0].obj[0]);
+ if (ret)
+ goto error;
+ dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
+
+ nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
+
+ ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
+ if (ret)
+ goto error;
+ dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
+
+ ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
+ NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+ NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+ &priv->bar3_dmaobj);
+ if (ret)
+ goto error;
+
+ nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
+ nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
+
+ dev_priv->engine.instmem.flush(dev);
+ dev_priv->ramin_available = true;
+
+ tmp = nv_ro32(chan->ramin, 0);
+ nv_wo32(chan->ramin, 0, ~tmp);
+ if (nv_ro32(chan->ramin, 0) != ~tmp) {
+ NV_ERROR(dev, "PRAMIN readback failed\n");
+ ret = -EIO;
+ goto error;
+ }
+ nv_wo32(chan->ramin, 0, tmp);
+
+ /* BAR1 */
+ ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
+ NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
+ NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
+ &priv->bar1_dmaobj);
+ if (ret)
+ goto error;
+
+ nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x1900 + (i*4), 0);
+
+ /* Create shared channel VM, space is reserved at the beginning
+ * to catch "NULL pointer" references
+ */
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
+ &dev_priv->chan_vm);
+ if (ret)
+ return ret;
+
+ return 0;
+
+error:
+ nv50_instmem_takedown(dev);
+ return ret;
+}
+
+void
+nv50_instmem_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_channel *chan = dev_priv->channels.ptr[0];
+ int i;
+
+ NV_DEBUG(dev, "\n");
+
+ if (!priv)
+ return;
+
+ dev_priv->ramin_available = false;
+
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
+
+ for (i = 0x1700; i <= 0x1710; i += 4)
+ nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
+
+ nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
+ nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
+
+ nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
+ dev_priv->channels.ptr[127] = 0;
+ nv50_channel_del(&dev_priv->channels.ptr[0]);
+
+ nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+ nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
+
+ if (drm_mm_initialized(&dev_priv->ramin_heap))
+ drm_mm_takedown(&dev_priv->ramin_heap);
+
+ dev_priv->engine.instmem.priv = NULL;
+ kfree(priv);
+}
+
+int
+nv50_instmem_suspend(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->ramin_available = false;
+ return 0;
+}
+
+void
+nv50_instmem_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_channel *chan = dev_priv->channels.ptr[0];
+ int i;
+
+ /* Poke the relevant regs, and pray it works :) */
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
+ nv_wr32(dev, NV50_PUNK_UNK1710, 0);
+ nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
+ NV50_PUNK_BAR_CFG_BASE_VALID);
+ nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
+ NV50_PUNK_BAR1_CTXDMA_VALID);
+ nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
+ NV50_PUNK_BAR3_CTXDMA_VALID);
+
+ for (i = 0; i < 8; i++)
+ nv_wr32(dev, 0x1900 + (i*4), 0);
+
+ dev_priv->ramin_available = true;
+}
+
+struct nv50_gpuobj_node {
+ struct nouveau_mem *vram;
+ struct nouveau_vma chan_vma;
+ u32 align;
+};
+
+int
+nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
+ u32 size, u32 align)
+{
+ struct drm_device *dev = gpuobj->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nv50_gpuobj_node *node = NULL;
+ int ret;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ return -ENOMEM;
+ node->align = align;
+
+ size = (size + 4095) & ~4095;
+ align = max(align, (u32)4096);
+
+ ret = vram->get(dev, size, align, 0, 0, &node->vram);
+ if (ret) {
+ kfree(node);
+ return ret;
+ }
+
+ gpuobj->vinst = node->vram->offset;
+
+ if (gpuobj->flags & NVOBJ_FLAG_VM) {
+ u32 flags = NV_MEM_ACCESS_RW;
+ if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
+ flags |= NV_MEM_ACCESS_SYS;
+
+ ret = nouveau_vm_get(chan->vm, size, 12, flags,
+ &node->chan_vma);
+ if (ret) {
+ vram->put(dev, &node->vram);
+ kfree(node);
+ return ret;
+ }
+
+ nouveau_vm_map(&node->chan_vma, node->vram);
+ gpuobj->linst = node->chan_vma.offset;
+ }
+
+ gpuobj->size = size;
+ gpuobj->node = node;
+ return 0;
+}
+
+void
+nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_device *dev = gpuobj->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
+ struct nv50_gpuobj_node *node;
+
+ node = gpuobj->node;
+ gpuobj->node = NULL;
+
+ if (node->chan_vma.node) {
+ nouveau_vm_unmap(&node->chan_vma);
+ nouveau_vm_put(&node->chan_vma);
+ }
+ vram->put(dev, &node->vram);
+ kfree(node);
+}
+
+int
+nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
+ struct nv50_gpuobj_node *node = gpuobj->node;
+ int ret;
+
+ ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
+ NV_MEM_ACCESS_RW, &node->vram->bar_vma);
+ if (ret)
+ return ret;
+
+ nouveau_vm_map(&node->vram->bar_vma, node->vram);
+ gpuobj->pinst = node->vram->bar_vma.offset;
+ return 0;
+}
+
+void
+nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
+{
+ struct nv50_gpuobj_node *node = gpuobj->node;
+
+ if (node->vram->bar_vma.node) {
+ nouveau_vm_unmap(&node->vram->bar_vma);
+ nouveau_vm_put(&node->vram->bar_vma);
+ }
+}
+
+void
+nv50_instmem_flush(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
+ nv_wr32(dev, 0x00330c, 0x00000001);
+ if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
+ NV_ERROR(dev, "PRAMIN flush timeout\n");
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
+}
+
+void
+nv84_instmem_flush(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
+ nv_wr32(dev, 0x070000, 0x00000001);
+ if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
+ NV_ERROR(dev, "PRAMIN flush timeout\n");
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include <subdev/vm.h>
+
+struct nvc0_instmem_priv {
+ struct nouveau_gpuobj *bar1_pgd;
+ struct nouveau_channel *bar1;
+ struct nouveau_gpuobj *bar3_pgd;
+ struct nouveau_channel *bar3;
+};
+
+int
+nvc0_instmem_suspend(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->ramin_available = false;
+ return 0;
+}
+
+void
+nvc0_instmem_resume(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
+
+ nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
+ nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
+ dev_priv->ramin_available = true;
+}
+
+static void
+nvc0_channel_del(struct nouveau_channel **pchan)
+{
+ struct nouveau_channel *chan;
+
+ chan = *pchan;
+ *pchan = NULL;
+ if (!chan)
+ return;
+
+ nouveau_vm_ref(NULL, &chan->vm, NULL);
+ if (drm_mm_initialized(&chan->ramin_heap))
+ drm_mm_takedown(&chan->ramin_heap);
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
+ kfree(chan);
+}
+
+static int
+nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
+ struct nouveau_channel **pchan,
+ struct nouveau_gpuobj *pgd, u64 vm_size)
+{
+ struct nouveau_channel *chan;
+ int ret;
+
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
+
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
+ }
+
+ ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
+ }
+
+ ret = nouveau_vm_ref(vm, &chan->vm, NULL);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
+ }
+
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
+ nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
+ nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
+
+ *pchan = chan;
+ return 0;
+}
+
+int
+nvc0_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct pci_dev *pdev = dev->pdev;
+ struct nvc0_instmem_priv *priv;
+ struct nouveau_vm *vm = NULL;
+ int ret;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+ pinstmem->priv = priv;
+
+ /* BAR3 VM */
+ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
+ &dev_priv->bar3_vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL,
+ (pci_resource_len(pdev, 3) >> 12) * 8, 0,
+ NVOBJ_FLAG_DONT_MAP |
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->bar3_vm->pgt[0].obj[0]);
+ if (ret)
+ goto error;
+ dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
+
+ nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
+ priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
+ if (ret)
+ goto error;
+
+ /* BAR1 VM */
+ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
+ priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
+ if (ret)
+ goto error;
+
+ /* channel vm */
+ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
+ &dev_priv->chan_vm);
+ if (ret)
+ goto error;
+
+ nvc0_instmem_resume(dev);
+ return 0;
+error:
+ nvc0_instmem_takedown(dev);
+ return ret;
+}
+
+void
+nvc0_instmem_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_vm *vm = NULL;
+
+ nvc0_instmem_suspend(dev);
+
+ nv_wr32(dev, 0x1704, 0x00000000);
+ nv_wr32(dev, 0x1714, 0x00000000);
+
+ nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
+
+ nvc0_channel_del(&priv->bar1);
+ nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
+
+ nvc0_channel_del(&priv->bar3);
+ nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
+ nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
+ nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+ nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
+
+ dev_priv->engine.instmem.priv = NULL;
+ kfree(priv);
+}
--- /dev/null
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv04_mc_init(struct drm_device *dev)
+{
+ /* Power up everything, resetting each individual unit will
+ * be done later if needed.
+ */
+
+ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+
+ /* Disable PROM access. */
+ nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
+
+ return 0;
+}
+
+void
+nv04_mc_takedown(struct drm_device *dev)
+{
+}
--- /dev/null
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv40_mc_init(struct drm_device *dev)
+{
+ /* Power up everything, resetting each individual unit will
+ * be done later if needed.
+ */
+ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+
+ if (nv44_graph_class(dev)) {
+ u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
+ nv_wr32(dev, NV40_PMC_1700, tmp);
+ nv_wr32(dev, NV40_PMC_1704, 0);
+ nv_wr32(dev, NV40_PMC_1708, 0);
+ nv_wr32(dev, NV40_PMC_170C, tmp);
+ }
+
+ return 0;
+}
+
+void
+nv40_mc_takedown(struct drm_device *dev)
+{
+}
--- /dev/null
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+int
+nv50_mc_init(struct drm_device *dev)
+{
+ nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
+ return 0;
+}
+
+void nv50_mc_takedown(struct drm_device *dev)
+{
+}
--- /dev/null
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+#include "nouveau_hw.h"
+
+int
+nv04_timer_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ u32 m, n, d;
+
+ nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
+ nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
+
+ /* aim for 31.25MHz, which gives us nanosecond timestamps */
+ d = 1000000 / 32;
+
+ /* determine base clock for timer source */
+ if (dev_priv->chipset < 0x40) {
+ n = nouveau_hw_get_clock(dev, PLL_CORE);
+ } else
+ if (dev_priv->chipset == 0x40) {
+ /*XXX: figure this out */
+ n = 0;
+ } else {
+ n = dev_priv->crystal;
+ m = 1;
+ while (n < (d * 2)) {
+ n += (n / m);
+ m++;
+ }
+
+ nv_wr32(dev, 0x009220, m - 1);
+ }
+
+ if (!n) {
+ NV_WARN(dev, "PTIMER: unknown input clock freq\n");
+ if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
+ !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
+ nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1);
+ nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1);
+ }
+ return 0;
+ }
+
+ /* reduce ratio to acceptable values */
+ while (((n % 5) == 0) && ((d % 5) == 0)) {
+ n /= 5;
+ d /= 5;
+ }
+
+ while (((n % 2) == 0) && ((d % 2) == 0)) {
+ n /= 2;
+ d /= 2;
+ }
+
+ while (n > 0xffff || d > 0xffff) {
+ n >>= 1;
+ d >>= 1;
+ }
+
+ nv_wr32(dev, NV04_PTIMER_NUMERATOR, n);
+ nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d);
+ return 0;
+}
+
+u64
+nv04_timer_read(struct drm_device *dev)
+{
+ u32 hi, lo;
+
+ do {
+ hi = nv_rd32(dev, NV04_PTIMER_TIME_1);
+ lo = nv_rd32(dev, NV04_PTIMER_TIME_0);
+ } while (hi != nv_rd32(dev, NV04_PTIMER_TIME_1));
+
+ return ((u64)hi << 32 | lo);
+}
+
+void
+nv04_timer_takedown(struct drm_device *dev)
+{
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include <core/mm.h>
+#include <subdev/vm.h>
+
+void
+nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
+{
+ struct nouveau_vm *vm = vma->vm;
+ struct nouveau_mm_node *r;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ delta = 0;
+ list_for_each_entry(r, &node->regions, rl_entry) {
+ u64 phys = (u64)r->offset << 12;
+ u32 num = r->length >> bits;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->map(vma, pgt, node, pte, len, phys, delta);
+
+ num -= len;
+ pte += len;
+ if (unlikely(end >= max)) {
+ phys += len << (bits + 12);
+ pde++;
+ pte = 0;
+ }
+
+ delta += (u64)len << vma->node->type;
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
+{
+ nouveau_vm_map_at(vma, 0, node);
+}
+
+void
+nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
+ struct nouveau_mem *mem)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ unsigned m, sglen;
+ u32 end, len;
+ int i;
+ struct scatterlist *sg;
+
+ for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+ sglen = sg_dma_len(sg) >> PAGE_SHIFT;
+
+ end = pte + sglen;
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ for (m = 0; m < len; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+ vm->map_sg(vma, pgt, mem, pte, 1, &addr);
+ num--;
+ pte++;
+
+ if (num == 0)
+ goto finish;
+ }
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ if (m < sglen) {
+ for (; m < sglen; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+
+ vm->map_sg(vma, pgt, mem, pte, 1, &addr);
+ num--;
+ pte++;
+ if (num == 0)
+ goto finish;
+ }
+ }
+
+ }
+finish:
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
+ struct nouveau_mem *mem)
+{
+ struct nouveau_vm *vm = vma->vm;
+ dma_addr_t *list = mem->pages;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->map_sg(vma, pgt, mem, pte, len, list);
+
+ num -= len;
+ pte += len;
+ list += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
+{
+ struct nouveau_vm *vm = vma->vm;
+ int big = vma->node->type != vm->spg_shift;
+ u32 offset = vma->node->offset + (delta >> 12);
+ u32 bits = vma->node->type - 12;
+ u32 num = length >> vma->node->type;
+ u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
+ u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
+ u32 max = 1 << (vm->pgt_bits - bits);
+ u32 end, len;
+
+ while (num) {
+ struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
+
+ end = (pte + num);
+ if (unlikely(end >= max))
+ end = max;
+ len = end - pte;
+
+ vm->unmap(pgt, pte, len);
+
+ num -= len;
+ pte += len;
+ if (unlikely(end >= max)) {
+ pde++;
+ pte = 0;
+ }
+ }
+
+ vm->flush(vm);
+}
+
+void
+nouveau_vm_unmap(struct nouveau_vma *vma)
+{
+ nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
+}
+
+static void
+nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
+{
+ struct nouveau_vm_pgd *vpgd;
+ struct nouveau_vm_pgt *vpgt;
+ struct nouveau_gpuobj *pgt;
+ u32 pde;
+
+ for (pde = fpde; pde <= lpde; pde++) {
+ vpgt = &vm->pgt[pde - vm->fpde];
+ if (--vpgt->refcount[big])
+ continue;
+
+ pgt = vpgt->obj[big];
+ vpgt->obj[big] = NULL;
+
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
+
+ mutex_unlock(&vm->mm.mutex);
+ nouveau_gpuobj_ref(NULL, &pgt);
+ mutex_lock(&vm->mm.mutex);
+ }
+}
+
+static int
+nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
+{
+ struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ struct nouveau_vm_pgd *vpgd;
+ struct nouveau_gpuobj *pgt;
+ int big = (type != vm->spg_shift);
+ u32 pgt_size;
+ int ret;
+
+ pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
+ pgt_size *= 8;
+
+ mutex_unlock(&vm->mm.mutex);
+ ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC, &pgt);
+ mutex_lock(&vm->mm.mutex);
+ if (unlikely(ret))
+ return ret;
+
+ /* someone beat us to filling the PDE while we didn't have the lock */
+ if (unlikely(vpgt->refcount[big]++)) {
+ mutex_unlock(&vm->mm.mutex);
+ nouveau_gpuobj_ref(NULL, &pgt);
+ mutex_lock(&vm->mm.mutex);
+ return 0;
+ }
+
+ vpgt->obj[big] = pgt;
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ vm->map_pgt(vpgd->obj, pde, vpgt->obj);
+ }
+
+ return 0;
+}
+
+int
+nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
+ u32 access, struct nouveau_vma *vma)
+{
+ u32 align = (1 << page_shift) >> 12;
+ u32 msize = size >> 12;
+ u32 fpde, lpde, pde;
+ int ret;
+
+ mutex_lock(&vm->mm.mutex);
+ ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&vm->mm.mutex);
+ return ret;
+ }
+
+ fpde = (vma->node->offset >> vm->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+ for (pde = fpde; pde <= lpde; pde++) {
+ struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
+ int big = (vma->node->type != vm->spg_shift);
+
+ if (likely(vpgt->refcount[big])) {
+ vpgt->refcount[big]++;
+ continue;
+ }
+
+ ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
+ if (ret) {
+ if (pde != fpde)
+ nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
+ nouveau_mm_put(&vm->mm, vma->node);
+ mutex_unlock(&vm->mm.mutex);
+ vma->node = NULL;
+ return ret;
+ }
+ }
+ mutex_unlock(&vm->mm.mutex);
+
+ vma->vm = vm;
+ vma->offset = (u64)vma->node->offset << 12;
+ vma->access = access;
+ return 0;
+}
+
+void
+nouveau_vm_put(struct nouveau_vma *vma)
+{
+ struct nouveau_vm *vm = vma->vm;
+ u32 fpde, lpde;
+
+ if (unlikely(vma->node == NULL))
+ return;
+ fpde = (vma->node->offset >> vm->pgt_bits);
+ lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
+
+ mutex_lock(&vm->mm.mutex);
+ nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
+ nouveau_mm_put(&vm->mm, vma->node);
+ vma->node = NULL;
+ mutex_unlock(&vm->mm.mutex);
+}
+
+int
+nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
+ struct nouveau_vm **pvm)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_vm *vm;
+ u64 mm_length = (offset + length) - mm_offset;
+ u32 block, pgt_bits;
+ int ret;
+
+ vm = kzalloc(sizeof(*vm), GFP_KERNEL);
+ if (!vm)
+ return -ENOMEM;
+
+ if (dev_priv->card_type == NV_50) {
+ vm->map_pgt = nv50_vm_map_pgt;
+ vm->map = nv50_vm_map;
+ vm->map_sg = nv50_vm_map_sg;
+ vm->unmap = nv50_vm_unmap;
+ vm->flush = nv50_vm_flush;
+ vm->spg_shift = 12;
+ vm->lpg_shift = 16;
+
+ pgt_bits = 29;
+ block = (1 << pgt_bits);
+ if (length < block)
+ block = length;
+
+ } else
+ if (dev_priv->card_type >= NV_C0) {
+ vm->map_pgt = nvc0_vm_map_pgt;
+ vm->map = nvc0_vm_map;
+ vm->map_sg = nvc0_vm_map_sg;
+ vm->unmap = nvc0_vm_unmap;
+ vm->flush = nvc0_vm_flush;
+ vm->spg_shift = 12;
+ vm->lpg_shift = 17;
+ pgt_bits = 27;
+ block = 4096;
+ } else {
+ kfree(vm);
+ return -ENOSYS;
+ }
+
+ vm->fpde = offset >> pgt_bits;
+ vm->lpde = (offset + length - 1) >> pgt_bits;
+ vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
+ if (!vm->pgt) {
+ kfree(vm);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&vm->pgd_list);
+ vm->dev = dev;
+ vm->refcount = 1;
+ vm->pgt_bits = pgt_bits - 12;
+
+ ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
+ block >> 12);
+ if (ret) {
+ kfree(vm);
+ return ret;
+ }
+
+ *pvm = vm;
+ return 0;
+}
+
+static int
+nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm_pgd *vpgd;
+ int i;
+
+ if (!pgd)
+ return 0;
+
+ vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
+ if (!vpgd)
+ return -ENOMEM;
+
+ nouveau_gpuobj_ref(pgd, &vpgd->obj);
+
+ mutex_lock(&vm->mm.mutex);
+ for (i = vm->fpde; i <= vm->lpde; i++)
+ vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+ list_add(&vpgd->head, &vm->pgd_list);
+ mutex_unlock(&vm->mm.mutex);
+ return 0;
+}
+
+static void
+nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
+{
+ struct nouveau_vm_pgd *vpgd, *tmp;
+ struct nouveau_gpuobj *pgd = NULL;
+
+ if (!mpgd)
+ return;
+
+ mutex_lock(&vm->mm.mutex);
+ list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+ if (vpgd->obj == mpgd) {
+ pgd = vpgd->obj;
+ list_del(&vpgd->head);
+ kfree(vpgd);
+ break;
+ }
+ }
+ mutex_unlock(&vm->mm.mutex);
+
+ nouveau_gpuobj_ref(NULL, &pgd);
+}
+
+static void
+nouveau_vm_del(struct nouveau_vm *vm)
+{
+ struct nouveau_vm_pgd *vpgd, *tmp;
+
+ list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
+ nouveau_vm_unlink(vm, vpgd->obj);
+ }
+
+ nouveau_mm_fini(&vm->mm);
+ kfree(vm->pgt);
+ kfree(vm);
+}
+
+int
+nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
+ struct nouveau_gpuobj *pgd)
+{
+ struct nouveau_vm *vm;
+ int ret;
+
+ vm = ref;
+ if (vm) {
+ ret = nouveau_vm_link(vm, pgd);
+ if (ret)
+ return ret;
+
+ vm->refcount++;
+ }
+
+ vm = *ptr;
+ *ptr = ref;
+
+ if (vm) {
+ nouveau_vm_unlink(vm, pgd);
+
+ if (--vm->refcount == 0)
+ nouveau_vm_del(vm);
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include <subdev/vm.h>
+
+void
+nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
+ struct nouveau_gpuobj *pgt[2])
+{
+ u64 phys = 0xdeadcafe00000000ULL;
+ u32 coverage = 0;
+
+ if (pgt[0]) {
+ phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
+ coverage = (pgt[0]->size >> 3) << 12;
+ } else
+ if (pgt[1]) {
+ phys = 0x00000001 | pgt[1]->vinst; /* present */
+ coverage = (pgt[1]->size >> 3) << 16;
+ }
+
+ if (phys & 1) {
+ if (coverage <= 32 * 1024 * 1024)
+ phys |= 0x60;
+ else if (coverage <= 64 * 1024 * 1024)
+ phys |= 0x40;
+ else if (coverage <= 128 * 1024 * 1024)
+ phys |= 0x20;
+ }
+
+ nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
+ nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
+}
+
+static inline u64
+vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+{
+ phys |= 1; /* present */
+ phys |= (u64)memtype << 40;
+ phys |= target << 4;
+ if (vma->access & NV_MEM_ACCESS_SYS)
+ phys |= (1 << 6);
+ if (!(vma->access & NV_MEM_ACCESS_WO))
+ phys |= (1 << 3);
+ return phys;
+}
+
+void
+nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
+{
+ struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
+ u32 comp = (mem->memtype & 0x180) >> 7;
+ u32 block, target;
+ int i;
+
+ /* IGPs don't have real VRAM, re-target to stolen system memory */
+ target = 0;
+ if (dev_priv->vram_sys_base) {
+ phys += dev_priv->vram_sys_base;
+ target = 3;
+ }
+
+ phys = vm_addr(vma, phys, mem->memtype, target);
+ pte <<= 3;
+ cnt <<= 3;
+
+ while (cnt) {
+ u32 offset_h = upper_32_bits(phys);
+ u32 offset_l = lower_32_bits(phys);
+
+ for (i = 7; i >= 0; i--) {
+ block = 1 << (i + 3);
+ if (cnt >= block && !(pte & (block - 1)))
+ break;
+ }
+ offset_l |= (i << 7);
+
+ phys += block << (vma->node->type - 3);
+ cnt -= block;
+ if (comp) {
+ u32 tag = mem->tag->start + ((delta >> 16) * comp);
+ offset_h |= (tag << 17);
+ delta += block << (vma->node->type - 3);
+ }
+
+ while (block) {
+ nv_wo32(pgt, pte + 0, offset_l);
+ nv_wo32(pgt, pte + 4, offset_h);
+ pte += 8;
+ block -= 8;
+ }
+ }
+}
+
+void
+nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+{
+ u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
+ pte <<= 3;
+ while (cnt--) {
+ u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ pte += 8;
+ }
+}
+
+void
+nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, 0x00000000);
+ nv_wo32(pgt, pte + 4, 0x00000000);
+ pte += 8;
+ }
+}
+
+void
+nv50_vm_flush(struct nouveau_vm *vm)
+{
+ struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ int i;
+
+ pinstmem->flush(vm->dev);
+
+ /* BAR */
+ if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
+ nv50_vm_flush_engine(vm->dev, 6);
+ return;
+ }
+
+ for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
+ if (atomic_read(&vm->engref[i]))
+ dev_priv->eng[i]->tlb_flush(vm->dev, i);
+ }
+}
+
+void
+nv50_vm_flush_engine(struct drm_device *dev, int engine)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
+ nv_wr32(dev, 0x100c80, (engine << 16) | 1);
+ if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
+ NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
+}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include <subdev/vm.h>
+
+void
+nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
+ struct nouveau_gpuobj *pgt[2])
+{
+ u32 pde[2] = { 0, 0 };
+
+ if (pgt[0])
+ pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
+ if (pgt[1])
+ pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
+
+ nv_wo32(pgd, (index * 8) + 0, pde[0]);
+ nv_wo32(pgd, (index * 8) + 4, pde[1]);
+}
+
+static inline u64
+nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
+{
+ phys >>= 8;
+
+ phys |= 0x00000001; /* present */
+ if (vma->access & NV_MEM_ACCESS_SYS)
+ phys |= 0x00000002;
+
+ phys |= ((u64)target << 32);
+ phys |= ((u64)memtype << 36);
+
+ return phys;
+}
+
+void
+nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
+{
+ u32 next = 1 << (vma->node->type - 8);
+
+ phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ phys += next;
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
+ struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
+{
+ u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
+
+ pte <<= 3;
+ while (cnt--) {
+ u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target);
+ nv_wo32(pgt, pte + 0, lower_32_bits(phys));
+ nv_wo32(pgt, pte + 4, upper_32_bits(phys));
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
+{
+ pte <<= 3;
+ while (cnt--) {
+ nv_wo32(pgt, pte + 0, 0x00000000);
+ nv_wo32(pgt, pte + 4, 0x00000000);
+ pte += 8;
+ }
+}
+
+void
+nvc0_vm_flush(struct nouveau_vm *vm)
+{
+ struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct drm_device *dev = vm->dev;
+ struct nouveau_vm_pgd *vpgd;
+ unsigned long flags;
+ u32 engine;
+
+ engine = 1;
+ if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm)
+ engine |= 4;
+
+ pinstmem->flush(vm->dev);
+
+ spin_lock_irqsave(&dev_priv->vm_lock, flags);
+ list_for_each_entry(vpgd, &vm->pgd_list, head) {
+ /* looks like maybe a "free flush slots" counter, the
+ * faster you write to 0x100cbc to more it decreases
+ */
+ if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
+ NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
+ nv_rd32(dev, 0x100c80), engine);
+ }
+ nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
+ nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
+ /* wait for flush to be queued? */
+ if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
+ NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
+ nv_rd32(dev, 0x100c80), engine);
+ }
+ }
+ spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
+}
#include "nouveau_drv.h"
#include "nouveau_dma.h"
#include "nouveau_abi16.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_software.h"
int
#include "nouveau_drv.h"
#include "nouveau_hw.h"
#include "nouveau_encoder.h"
-#include "nouveau_gpio.h"
+#include <subdev/gpio.h>
#include <linux/io-mapping.h>
#include <linux/firmware.h>
#define __NOUVEAU_BIOS_H__
#include "nvreg.h"
-#include "nouveau_i2c.h"
+#include <subdev/i2c.h>
#define DCB_MAX_NUM_ENTRIES 16
#define DCB_MAX_NUM_I2C_ENTRIES 16
#include "nouveau_drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_mm.h"
-#include "nouveau_vm.h"
+#include <core/mm.h>
+#include <subdev/vm.h>
#include "nouveau_fence.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include <linux/log2.h>
#include <linux/slab.h>
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_dma.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
+#include <engine/fifo.h>
+#include <core/ramht.h>
#include "nouveau_fence.h"
#include "nouveau_software.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include "nouveau_connector.h"
-#include "nouveau_gpio.h"
+#include <subdev/gpio.h>
#include "nouveau_hw.h"
static void nouveau_connector_hotplug(void *, int);
#define __NOUVEAU_CONNECTOR_H__
#include "drm_edid.h"
-#include "nouveau_i2c.h"
+#include <subdev/i2c.h>
enum nouveau_underscan_type {
UNDERSCAN_OFF,
#include "nouveau_dma.h"
#include "nouveau_connector.h"
#include "nouveau_software.h"
-#include "nouveau_gpio.h"
+#include <subdev/gpio.h>
#include "nouveau_fence.h"
#include "nv50_display.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
void
nouveau_dma_init(struct nouveau_channel *chan)
#include "drmP.h"
#include "nouveau_drv.h"
-#include "nouveau_i2c.h"
+#include <subdev/i2c.h>
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
-#include "nouveau_gpio.h"
+#include <subdev/gpio.h>
/******************************************************************************
* aux channel util functions
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
#include "nouveau_pm.h"
-#include "nouveau_fifo.h"
+#include <engine/fifo.h>
#include "nv50_display.h"
#include "drm_pciids.h"
#include "nouveau_drm.h"
#include "nouveau_reg.h"
-#include "nouveau_bios.h"
+#include <nouveau_bios.h>
#include "nouveau_util.h"
struct nouveau_grctx;
struct nouveau_mem;
-#include "nouveau_vm.h"
+#include <subdev/vm.h>
#define MAX_NUM_DCB_ENTRIES 16
return !(0x0baf & (1 << (dev_priv->chipset & 0x0f)));
}
+int nv50_vram_init(struct drm_device *);
+void nv50_vram_fini(struct drm_device *);
+int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
+ u32 memtype, struct nouveau_mem **);
+void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
+bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
+int nvc0_vram_init(struct drm_device *);
+int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
+ u32 memtype, struct nouveau_mem **);
+bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
+
/* memory type/access flags, do not match hardware values */
#define NV_MEM_ACCESS_RO 1
#define NV_MEM_ACCESS_WO 2
#include <linux/hrtimer.h>
#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_fence.h"
#include "nouveau_software.h"
#include "nouveau_dma.h"
+++ /dev/null
-#ifndef __NOUVEAU_FIFO_H__
-#define __NOUVEAU_FIFO_H__
-
-struct nouveau_fifo_priv {
- struct nouveau_exec_engine base;
- u32 channels;
-};
-
-struct nouveau_fifo_chan {
-};
-
-bool nv04_fifo_cache_pull(struct drm_device *, bool);
-void nv04_fifo_context_del(struct nouveau_channel *, int);
-int nv04_fifo_fini(struct drm_device *, int, bool);
-int nv04_fifo_init(struct drm_device *, int);
-void nv04_fifo_isr(struct drm_device *);
-void nv04_fifo_destroy(struct drm_device *, int);
-
-void nv50_fifo_playlist_update(struct drm_device *);
-void nv50_fifo_destroy(struct drm_device *, int);
-void nv50_fifo_tlb_flush(struct drm_device *, int);
-
-int nv04_fifo_create(struct drm_device *);
-int nv10_fifo_create(struct drm_device *);
-int nv17_fifo_create(struct drm_device *);
-int nv40_fifo_create(struct drm_device *);
-int nv50_fifo_create(struct drm_device *);
-int nv84_fifo_create(struct drm_device *);
-int nvc0_fifo_create(struct drm_device *);
-int nve0_fifo_create(struct drm_device *);
-
-#endif
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_i2c.h"
-#include "nouveau_gpio.h"
-
-static u8 *
-dcb_gpio_table(struct drm_device *dev)
-{
- u8 *dcb = dcb_table(dev);
- if (dcb) {
- if (dcb[0] >= 0x30 && dcb[1] >= 0x0c)
- return ROMPTR(dev, dcb[0x0a]);
- if (dcb[0] >= 0x22 && dcb[-1] >= 0x13)
- return ROMPTR(dev, dcb[-15]);
- }
- return NULL;
-}
-
-static u8 *
-dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version)
-{
- u8 *table = dcb_gpio_table(dev);
- if (table) {
- *version = table[0];
- if (*version < 0x30 && ent < table[2])
- return table + 3 + (ent * table[1]);
- else if (ent < table[2])
- return table + table[1] + (ent * table[3]);
- }
- return NULL;
-}
-
-int
-nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
- return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV;
-}
-
-int
-nouveau_gpio_sense(struct drm_device *dev, int idx, int line)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
- return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV;
-}
-
-int
-nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line,
- struct gpio_func *gpio)
-{
- u8 *table, *entry, version;
- int i = -1;
-
- if (line == 0xff && func == 0xff)
- return -EINVAL;
-
- while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) {
- if (version < 0x40) {
- u16 data = ROM16(entry[0]);
- *gpio = (struct gpio_func) {
- .line = (data & 0x001f) >> 0,
- .func = (data & 0x07e0) >> 5,
- .log[0] = (data & 0x1800) >> 11,
- .log[1] = (data & 0x6000) >> 13,
- };
- } else
- if (version < 0x41) {
- *gpio = (struct gpio_func) {
- .line = entry[0] & 0x1f,
- .func = entry[1],
- .log[0] = (entry[3] & 0x18) >> 3,
- .log[1] = (entry[3] & 0x60) >> 5,
- };
- } else {
- *gpio = (struct gpio_func) {
- .line = entry[0] & 0x3f,
- .func = entry[1],
- .log[0] = (entry[4] & 0x30) >> 4,
- .log[1] = (entry[4] & 0xc0) >> 6,
- };
- }
-
- if ((line == 0xff || line == gpio->line) &&
- (func == 0xff || func == gpio->func))
- return 0;
- }
-
- /* DCB 2.2, fixed TVDAC GPIO data */
- if ((table = dcb_table(dev)) && table[0] >= 0x22) {
- if (func == DCB_GPIO_TVDAC0) {
- *gpio = (struct gpio_func) {
- .func = DCB_GPIO_TVDAC0,
- .line = table[-4] >> 4,
- .log[0] = !!(table[-5] & 2),
- .log[1] = !(table[-5] & 2),
- };
- return 0;
- }
- }
-
- /* Apple iMac G4 NV18 */
- if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
- if (func == DCB_GPIO_TVDAC0) {
- *gpio = (struct gpio_func) {
- .func = DCB_GPIO_TVDAC0,
- .line = 4,
- .log[0] = 0,
- .log[1] = 1,
- };
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-int
-nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state)
-{
- struct gpio_func gpio;
- int ret;
-
- ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
- if (ret == 0) {
- int dir = !!(gpio.log[state] & 0x02);
- int out = !!(gpio.log[state] & 0x01);
- ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out);
- }
-
- return ret;
-}
-
-int
-nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line)
-{
- struct gpio_func gpio;
- int ret;
-
- ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
- if (ret == 0) {
- ret = nouveau_gpio_sense(dev, idx, gpio.line);
- if (ret >= 0)
- ret = (ret == (gpio.log[1] & 1));
- }
-
- return ret;
-}
-
-int
-nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct gpio_func gpio;
- int ret;
-
- ret = nouveau_gpio_find(dev, idx, tag, line, &gpio);
- if (ret == 0) {
- if (idx == 0 && pgpio->irq_enable)
- pgpio->irq_enable(dev, gpio.line, on);
- else
- ret = -ENODEV;
- }
-
- return ret;
-}
-
-struct gpio_isr {
- struct drm_device *dev;
- struct list_head head;
- struct work_struct work;
- int idx;
- struct gpio_func func;
- void (*handler)(void *, int);
- void *data;
- bool inhibit;
-};
-
-static void
-nouveau_gpio_isr_bh(struct work_struct *work)
-{
- struct gpio_isr *isr = container_of(work, struct gpio_isr, work);
- struct drm_device *dev = isr->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- unsigned long flags;
- int state;
-
- state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line);
- if (state >= 0)
- isr->handler(isr->data, state);
-
- spin_lock_irqsave(&pgpio->lock, flags);
- isr->inhibit = false;
- spin_unlock_irqrestore(&pgpio->lock, flags);
-}
-
-void
-nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct gpio_isr *isr;
-
- if (idx != 0)
- return;
-
- spin_lock(&pgpio->lock);
- list_for_each_entry(isr, &pgpio->isr, head) {
- if (line_mask & (1 << isr->func.line)) {
- if (isr->inhibit)
- continue;
- isr->inhibit = true;
- schedule_work(&isr->work);
- }
- }
- spin_unlock(&pgpio->lock);
-}
-
-int
-nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line,
- void (*handler)(void *, int), void *data)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct gpio_isr *isr;
- unsigned long flags;
- int ret;
-
- isr = kzalloc(sizeof(*isr), GFP_KERNEL);
- if (!isr)
- return -ENOMEM;
-
- ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func);
- if (ret) {
- kfree(isr);
- return ret;
- }
-
- INIT_WORK(&isr->work, nouveau_gpio_isr_bh);
- isr->dev = dev;
- isr->handler = handler;
- isr->data = data;
- isr->idx = idx;
-
- spin_lock_irqsave(&pgpio->lock, flags);
- list_add(&isr->head, &pgpio->isr);
- spin_unlock_irqrestore(&pgpio->lock, flags);
- return 0;
-}
-
-void
-nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line,
- void (*handler)(void *, int), void *data)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- struct gpio_isr *isr, *tmp;
- struct gpio_func func;
- unsigned long flags;
- LIST_HEAD(tofree);
- int ret;
-
- ret = nouveau_gpio_find(dev, idx, tag, line, &func);
- if (ret == 0) {
- spin_lock_irqsave(&pgpio->lock, flags);
- list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) {
- if (memcmp(&isr->func, &func, sizeof(func)) ||
- isr->idx != idx ||
- isr->handler != handler || isr->data != data)
- continue;
- list_move(&isr->head, &tofree);
- }
- spin_unlock_irqrestore(&pgpio->lock, flags);
-
- list_for_each_entry_safe(isr, tmp, &tofree, head) {
- flush_work_sync(&isr->work);
- kfree(isr);
- }
- }
-}
-
-int
-nouveau_gpio_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
- INIT_LIST_HEAD(&pgpio->isr);
- spin_lock_init(&pgpio->lock);
-
- return nouveau_gpio_init(dev);
-}
-
-void
-nouveau_gpio_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
- nouveau_gpio_fini(dev);
- BUG_ON(!list_empty(&pgpio->isr));
-}
-
-int
-nouveau_gpio_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
- int ret = 0;
-
- if (pgpio->init)
- ret = pgpio->init(dev);
-
- return ret;
-}
-
-void
-nouveau_gpio_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-
- if (pgpio->fini)
- pgpio->fini(dev);
-}
-
-void
-nouveau_gpio_reset(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u8 *entry, version;
- int ent = -1;
-
- while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) {
- u8 func = 0xff, line, defs, unk0, unk1;
- if (version >= 0x41) {
- defs = !!(entry[0] & 0x80);
- line = entry[0] & 0x3f;
- func = entry[1];
- unk0 = entry[2];
- unk1 = entry[3] & 0x1f;
- } else
- if (version >= 0x40) {
- line = entry[0] & 0x1f;
- func = entry[1];
- defs = !!(entry[3] & 0x01);
- unk0 = !!(entry[3] & 0x02);
- unk1 = !!(entry[3] & 0x04);
- } else {
- break;
- }
-
- if (func == 0xff)
- continue;
-
- nouveau_gpio_func_set(dev, func, defs);
-
- if (dev_priv->card_type >= NV_D0) {
- nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0);
- if (unk1--)
- nv_mask(dev, 0x00d740 + (unk1 * 4), 0xff, line);
- } else
- if (dev_priv->card_type >= NV_50) {
- static const u32 regs[] = { 0xe100, 0xe28c };
- u32 val = (unk1 << 16) | unk0;
- u32 reg = regs[line >> 4]; line &= 0x0f;
-
- nv_mask(dev, reg, 0x00010001 << line, val << line);
- }
- }
-}
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NOUVEAU_GPIO_H__
-#define __NOUVEAU_GPIO_H__
-
-struct gpio_func {
- u8 func;
- u8 line;
- u8 log[2];
-};
-
-/* nouveau_gpio.c */
-int nouveau_gpio_create(struct drm_device *);
-void nouveau_gpio_destroy(struct drm_device *);
-int nouveau_gpio_init(struct drm_device *);
-void nouveau_gpio_fini(struct drm_device *);
-void nouveau_gpio_reset(struct drm_device *);
-int nouveau_gpio_drive(struct drm_device *, int idx, int line,
- int dir, int out);
-int nouveau_gpio_sense(struct drm_device *, int idx, int line);
-int nouveau_gpio_find(struct drm_device *, int idx, u8 tag, u8 line,
- struct gpio_func *);
-int nouveau_gpio_set(struct drm_device *, int idx, u8 tag, u8 line, int state);
-int nouveau_gpio_get(struct drm_device *, int idx, u8 tag, u8 line);
-int nouveau_gpio_irq(struct drm_device *, int idx, u8 tag, u8 line, bool on);
-void nouveau_gpio_isr(struct drm_device *, int idx, u32 mask);
-int nouveau_gpio_isr_add(struct drm_device *, int idx, u8 tag, u8 line,
- void (*)(void *, int state), void *data);
-void nouveau_gpio_isr_del(struct drm_device *, int idx, u8 tag, u8 line,
- void (*)(void *, int state), void *data);
-
-static inline bool
-nouveau_gpio_func_valid(struct drm_device *dev, u8 tag)
-{
- struct gpio_func func;
- return (nouveau_gpio_find(dev, 0, tag, 0xff, &func)) == 0;
-}
-
-static inline int
-nouveau_gpio_func_set(struct drm_device *dev, u8 tag, int state)
-{
- return nouveau_gpio_set(dev, 0, tag, 0xff, state);
-}
-
-static inline int
-nouveau_gpio_func_get(struct drm_device *dev, u8 tag)
-{
- return nouveau_gpio_get(dev, 0, tag, 0xff);
-}
-
-#endif
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_drm.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
+#include <engine/fifo.h>
+#include <core/ramht.h>
#include "nouveau_software.h"
-#include "nouveau_vm.h"
+#include <subdev/vm.h>
struct nouveau_gpuobj_method {
struct list_head head;
+++ /dev/null
-#ifndef __NOUVEAU_GRCTX_H__
-#define __NOUVEAU_GRCTX_H__
-
-struct nouveau_grctx {
- struct drm_device *dev;
-
- enum {
- NOUVEAU_GRCTX_PROG,
- NOUVEAU_GRCTX_VALS
- } mode;
- void *data;
-
- uint32_t ctxprog_max;
- uint32_t ctxprog_len;
- uint32_t ctxprog_reg;
- int ctxprog_label[32];
- uint32_t ctxvals_pos;
- uint32_t ctxvals_base;
-};
-
-static inline void
-cp_out(struct nouveau_grctx *ctx, uint32_t inst)
-{
- uint32_t *ctxprog = ctx->data;
-
- if (ctx->mode != NOUVEAU_GRCTX_PROG)
- return;
-
- BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
- ctxprog[ctx->ctxprog_len++] = inst;
-}
-
-static inline void
-cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
-{
- cp_out(ctx, CP_LOAD_SR | val);
-}
-
-static inline void
-cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
-{
- ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
-
- ctx->ctxvals_base = ctx->ctxvals_pos;
- ctx->ctxvals_pos = ctx->ctxvals_base + length;
-
- if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) {
- cp_lsr(ctx, length);
- length = 0;
- }
-
- cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg);
-}
-
-static inline void
-cp_name(struct nouveau_grctx *ctx, int name)
-{
- uint32_t *ctxprog = ctx->data;
- int i;
-
- if (ctx->mode != NOUVEAU_GRCTX_PROG)
- return;
-
- ctx->ctxprog_label[name] = ctx->ctxprog_len;
- for (i = 0; i < ctx->ctxprog_len; i++) {
- if ((ctxprog[i] & 0xfff00000) != 0xff400000)
- continue;
- if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT))
- continue;
- ctxprog[i] = (ctxprog[i] & 0x00ff00ff) |
- (ctx->ctxprog_len << CP_BRA_IP_SHIFT);
- }
-}
-
-static inline void
-_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
-{
- int ip = 0;
-
- if (mod != 2) {
- ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT;
- if (ip == 0)
- ip = 0xff000000 | (name << CP_BRA_IP_SHIFT);
- }
-
- cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
- (state ? 0 : CP_BRA_IF_CLEAR));
-}
-#define cp_bra(c, f, s, n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
-#define cp_cal(c, f, s, n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
-#define cp_ret(c, f, s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
-
-static inline void
-_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
-{
- cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
-}
-#define cp_wait(c, f, s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
-
-static inline void
-_cp_set(struct nouveau_grctx *ctx, int flag, int state)
-{
- cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
-}
-#define cp_set(c, f, s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
-
-static inline void
-cp_pos(struct nouveau_grctx *ctx, int offset)
-{
- ctx->ctxvals_pos = offset;
- ctx->ctxvals_base = ctx->ctxvals_pos;
-
- cp_lsr(ctx, ctx->ctxvals_pos);
- cp_out(ctx, CP_SET_CONTEXT_POINTER);
-}
-
-static inline void
-gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
-{
- if (ctx->mode != NOUVEAU_GRCTX_VALS)
- return;
-
- reg = (reg - 0x00400000) / 4;
- reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
-
- nv_wo32(ctx->data, reg * 4, val);
-}
-
-#endif
+++ /dev/null
-/*
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/module.h>
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_i2c.h"
-#include "nouveau_hw.h"
-
-static void
-i2c_drive_scl(void *data, int state)
-{
- struct nouveau_i2c_chan *port = data;
- if (port->type == 0) {
- u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
- if (state) val |= 0x20;
- else val &= 0xdf;
- NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
- } else
- if (port->type == 4) {
- nv_mask(port->dev, port->drive, 0x2f, state ? 0x21 : 0x01);
- } else
- if (port->type == 5) {
- if (state) port->state |= 0x01;
- else port->state &= 0xfe;
- nv_wr32(port->dev, port->drive, 4 | port->state);
- }
-}
-
-static void
-i2c_drive_sda(void *data, int state)
-{
- struct nouveau_i2c_chan *port = data;
- if (port->type == 0) {
- u8 val = NVReadVgaCrtc(port->dev, 0, port->drive);
- if (state) val |= 0x10;
- else val &= 0xef;
- NVWriteVgaCrtc(port->dev, 0, port->drive, val | 0x01);
- } else
- if (port->type == 4) {
- nv_mask(port->dev, port->drive, 0x1f, state ? 0x11 : 0x01);
- } else
- if (port->type == 5) {
- if (state) port->state |= 0x02;
- else port->state &= 0xfd;
- nv_wr32(port->dev, port->drive, 4 | port->state);
- }
-}
-
-static int
-i2c_sense_scl(void *data)
-{
- struct nouveau_i2c_chan *port = data;
- struct drm_nouveau_private *dev_priv = port->dev->dev_private;
- if (port->type == 0) {
- return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x04);
- } else
- if (port->type == 4) {
- return !!(nv_rd32(port->dev, port->sense) & 0x00040000);
- } else
- if (port->type == 5) {
- if (dev_priv->card_type < NV_D0)
- return !!(nv_rd32(port->dev, port->sense) & 0x01);
- else
- return !!(nv_rd32(port->dev, port->sense) & 0x10);
- }
- return 0;
-}
-
-static int
-i2c_sense_sda(void *data)
-{
- struct nouveau_i2c_chan *port = data;
- struct drm_nouveau_private *dev_priv = port->dev->dev_private;
- if (port->type == 0) {
- return !!(NVReadVgaCrtc(port->dev, 0, port->sense) & 0x08);
- } else
- if (port->type == 4) {
- return !!(nv_rd32(port->dev, port->sense) & 0x00080000);
- } else
- if (port->type == 5) {
- if (dev_priv->card_type < NV_D0)
- return !!(nv_rd32(port->dev, port->sense) & 0x02);
- else
- return !!(nv_rd32(port->dev, port->sense) & 0x20);
- }
- return 0;
-}
-
-static const uint32_t nv50_i2c_port[] = {
- 0x00e138, 0x00e150, 0x00e168, 0x00e180,
- 0x00e254, 0x00e274, 0x00e764, 0x00e780,
- 0x00e79c, 0x00e7b8
-};
-
-static u8 *
-i2c_table(struct drm_device *dev, u8 *version)
-{
- u8 *dcb = dcb_table(dev), *i2c = NULL;
- if (dcb) {
- if (dcb[0] >= 0x15)
- i2c = ROMPTR(dev, dcb[2]);
- if (dcb[0] >= 0x30)
- i2c = ROMPTR(dev, dcb[4]);
- }
-
- /* early revisions had no version number, use dcb version */
- if (i2c) {
- *version = dcb[0];
- if (*version >= 0x30)
- *version = i2c[0];
- }
-
- return i2c;
-}
-
-int
-nouveau_i2c_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->vbios;
- struct nouveau_i2c_chan *port;
- u8 version = 0x00, entries, recordlen;
- u8 *i2c, *entry, legacy[2][4] = {};
- int ret, i;
-
- INIT_LIST_HEAD(&dev_priv->i2c_ports);
-
- i2c = i2c_table(dev, &version);
- if (!i2c) {
- u8 *bmp = &bios->data[bios->offset];
- if (bios->type != NVBIOS_BMP)
- return -ENODEV;
-
- legacy[0][0] = NV_CIO_CRE_DDC_WR__INDEX;
- legacy[0][1] = NV_CIO_CRE_DDC_STATUS__INDEX;
- legacy[1][0] = NV_CIO_CRE_DDC0_WR__INDEX;
- legacy[1][1] = NV_CIO_CRE_DDC0_STATUS__INDEX;
-
- /* BMP (from v4.0) has i2c info in the structure, it's in a
- * fixed location on earlier VBIOS
- */
- if (bmp[5] < 4)
- i2c = &bios->data[0x48];
- else
- i2c = &bmp[0x36];
-
- if (i2c[4]) legacy[0][0] = i2c[4];
- if (i2c[5]) legacy[0][1] = i2c[5];
- if (i2c[6]) legacy[1][0] = i2c[6];
- if (i2c[7]) legacy[1][1] = i2c[7];
- }
-
- if (version >= 0x30) {
- entry = i2c[1] + i2c;
- entries = i2c[2];
- recordlen = i2c[3];
- } else
- if (version) {
- entry = i2c;
- entries = 16;
- recordlen = 4;
- } else {
- entry = legacy[0];
- entries = 2;
- recordlen = 4;
- }
-
- for (i = 0; i < entries; i++, entry += recordlen) {
- port = kzalloc(sizeof(*port), GFP_KERNEL);
- if (port == NULL) {
- nouveau_i2c_fini(dev);
- return -ENOMEM;
- }
-
- port->type = entry[3];
- if (version < 0x30) {
- port->type &= 0x07;
- if (port->type == 0x07)
- port->type = 0xff;
- }
-
- if (port->type == 0xff) {
- kfree(port);
- continue;
- }
-
- switch (port->type) {
- case 0: /* NV04:NV50 */
- port->drive = entry[0];
- port->sense = entry[1];
- break;
- case 4: /* NV4E */
- port->drive = 0x600800 + entry[1];
- port->sense = port->drive;
- break;
- case 5: /* NV50- */
- port->drive = entry[0] & 0x0f;
- if (dev_priv->card_type < NV_D0) {
- if (port->drive >= ARRAY_SIZE(nv50_i2c_port))
- break;
- port->drive = nv50_i2c_port[port->drive];
- port->sense = port->drive;
- } else {
- port->drive = 0x00d014 + (port->drive * 0x20);
- port->sense = port->drive;
- }
- break;
- case 6: /* NV50- DP AUX */
- port->drive = entry[0] & 0x0f;
- port->sense = port->drive;
- port->adapter.algo = &nouveau_dp_i2c_algo;
- break;
- default:
- break;
- }
-
- if (!port->adapter.algo && !port->drive) {
- NV_ERROR(dev, "I2C%d: type %d index %x/%x unknown\n",
- i, port->type, port->drive, port->sense);
- kfree(port);
- continue;
- }
-
- snprintf(port->adapter.name, sizeof(port->adapter.name),
- "nouveau-%s-%d", pci_name(dev->pdev), i);
- port->adapter.owner = THIS_MODULE;
- port->adapter.dev.parent = &dev->pdev->dev;
- port->dev = dev;
- port->index = i;
- port->dcb = ROM32(entry[0]);
- i2c_set_adapdata(&port->adapter, i2c);
-
- if (port->adapter.algo != &nouveau_dp_i2c_algo) {
- port->adapter.algo_data = &port->bit;
- port->bit.udelay = 10;
- port->bit.timeout = usecs_to_jiffies(2200);
- port->bit.data = port;
- port->bit.setsda = i2c_drive_sda;
- port->bit.setscl = i2c_drive_scl;
- port->bit.getsda = i2c_sense_sda;
- port->bit.getscl = i2c_sense_scl;
-
- i2c_drive_scl(port, 0);
- i2c_drive_sda(port, 1);
- i2c_drive_scl(port, 1);
-
- ret = i2c_bit_add_bus(&port->adapter);
- } else {
- port->adapter.algo = &nouveau_dp_i2c_algo;
- ret = i2c_add_adapter(&port->adapter);
- }
-
- if (ret) {
- NV_ERROR(dev, "I2C%d: failed register: %d\n", i, ret);
- kfree(port);
- continue;
- }
-
- list_add_tail(&port->head, &dev_priv->i2c_ports);
- }
-
- return 0;
-}
-
-void
-nouveau_i2c_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_i2c_chan *port, *tmp;
-
- list_for_each_entry_safe(port, tmp, &dev_priv->i2c_ports, head) {
- i2c_del_adapter(&port->adapter);
- kfree(port);
- }
-}
-
-struct nouveau_i2c_chan *
-nouveau_i2c_find(struct drm_device *dev, u8 index)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_i2c_chan *port;
-
- if (index == NV_I2C_DEFAULT(0) ||
- index == NV_I2C_DEFAULT(1)) {
- u8 version, *i2c = i2c_table(dev, &version);
- if (i2c && version >= 0x30) {
- if (index == NV_I2C_DEFAULT(0))
- index = (i2c[4] & 0x0f);
- else
- index = (i2c[4] & 0xf0) >> 4;
- } else {
- index = 2;
- }
- }
-
- list_for_each_entry(port, &dev_priv->i2c_ports, head) {
- if (port->index == index)
- break;
- }
-
- if (&port->head == &dev_priv->i2c_ports)
- return NULL;
-
- if (dev_priv->card_type >= NV_50 && (port->dcb & 0x00000100)) {
- u32 reg = 0x00e500, val;
- if (port->type == 6) {
- reg += port->drive * 0x50;
- val = 0x2002;
- } else {
- reg += ((port->dcb & 0x1e00) >> 9) * 0x50;
- val = 0xe001;
- }
-
- /* nfi, but neither auxch or i2c work if it's 1 */
- nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000);
- /* nfi, but switches auxch vs normal i2c */
- nv_mask(dev, reg + 0x00, 0x0000f003, val);
- }
-
- return port;
-}
-
-bool
-nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
-{
- uint8_t buf[] = { 0 };
- struct i2c_msg msgs[] = {
- {
- .addr = addr,
- .flags = 0,
- .len = 1,
- .buf = buf,
- },
- {
- .addr = addr,
- .flags = I2C_M_RD,
- .len = 1,
- .buf = buf,
- }
- };
-
- return i2c_transfer(&i2c->adapter, msgs, 2) == 2;
-}
-
-int
-nouveau_i2c_identify(struct drm_device *dev, const char *what,
- struct i2c_board_info *info,
- bool (*match)(struct nouveau_i2c_chan *,
- struct i2c_board_info *),
- int index)
-{
- struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
- int i;
-
- if (!i2c) {
- NV_DEBUG(dev, "No bus when probing %s on %d\n", what, index);
- return -ENODEV;
- }
-
- NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, i2c->index);
- for (i = 0; info[i].addr; i++) {
- if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
- (!match || match(i2c, &info[i]))) {
- NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
- return i;
- }
- }
-
- NV_DEBUG(dev, "No devices found.\n");
- return -ENODEV;
-}
+++ /dev/null
-/*
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef __NOUVEAU_I2C_H__
-#define __NOUVEAU_I2C_H__
-
-#include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
-#include "drm_dp_helper.h"
-
-#define NV_I2C_PORT(n) (0x00 + (n))
-#define NV_I2C_PORT_NUM 0x10
-#define NV_I2C_DEFAULT(n) (0x80 + (n))
-
-struct nouveau_i2c_chan {
- struct i2c_adapter adapter;
- struct drm_device *dev;
- struct i2c_algo_bit_data bit;
- struct list_head head;
- u8 index;
- u8 type;
- u32 dcb;
- u32 drive;
- u32 sense;
- u32 state;
-};
-
-int nouveau_i2c_init(struct drm_device *);
-void nouveau_i2c_fini(struct drm_device *);
-struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, u8 index);
-bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
-int nouveau_i2c_identify(struct drm_device *dev, const char *what,
- struct i2c_board_info *info,
- bool (*match)(struct nouveau_i2c_chan *,
- struct i2c_board_info *),
- int index);
-
-extern const struct i2c_algorithm nouveau_dp_i2c_algo;
-
-#endif /* __NOUVEAU_I2C_H__ */
#include "nouveau_drm.h"
#include "nouveau_drv.h"
#include "nouveau_reg.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_util.h"
void
#include "nouveau_drv.h"
#include "nouveau_pm.h"
-#include "nouveau_mm.h"
-#include "nouveau_vm.h"
-#include "nouveau_fifo.h"
+#include <core/mm.h>
+#include <subdev/vm.h>
+#include <engine/fifo.h>
#include "nouveau_fence.h"
/*
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-
-static inline void
-region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
-{
- list_del(&a->nl_entry);
- list_del(&a->fl_entry);
- kfree(a);
-}
-
-static struct nouveau_mm_node *
-region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
-{
- struct nouveau_mm_node *b;
-
- if (a->length == size)
- return a;
-
- b = kmalloc(sizeof(*b), GFP_KERNEL);
- if (unlikely(b == NULL))
- return NULL;
-
- b->offset = a->offset;
- b->length = size;
- b->type = a->type;
- a->offset += size;
- a->length -= size;
- list_add_tail(&b->nl_entry, &a->nl_entry);
- if (b->type == 0)
- list_add_tail(&b->fl_entry, &a->fl_entry);
- return b;
-}
-
-#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
- list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
-
-void
-nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
-{
- struct nouveau_mm_node *prev = node(this, prev);
- struct nouveau_mm_node *next = node(this, next);
-
- list_add(&this->fl_entry, &mm->free);
- this->type = 0;
-
- if (prev && prev->type == 0) {
- prev->length += this->length;
- region_put(mm, this);
- this = prev;
- }
-
- if (next && next->type == 0) {
- next->offset = this->offset;
- next->length += this->length;
- region_put(mm, this);
- }
-}
-
-int
-nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
- u32 align, struct nouveau_mm_node **pnode)
-{
- struct nouveau_mm_node *prev, *this, *next;
- u32 min = size_nc ? size_nc : size;
- u32 align_mask = align - 1;
- u32 splitoff;
- u32 s, e;
-
- list_for_each_entry(this, &mm->free, fl_entry) {
- e = this->offset + this->length;
- s = this->offset;
-
- prev = node(this, prev);
- if (prev && prev->type != type)
- s = roundup(s, mm->block_size);
-
- next = node(this, next);
- if (next && next->type != type)
- e = rounddown(e, mm->block_size);
-
- s = (s + align_mask) & ~align_mask;
- e &= ~align_mask;
- if (s > e || e - s < min)
- continue;
-
- splitoff = s - this->offset;
- if (splitoff && !region_split(mm, this, splitoff))
- return -ENOMEM;
-
- this = region_split(mm, this, min(size, e - s));
- if (!this)
- return -ENOMEM;
-
- this->type = type;
- list_del(&this->fl_entry);
- *pnode = this;
- return 0;
- }
-
- return -ENOSPC;
-}
-
-int
-nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
-{
- struct nouveau_mm_node *node;
-
- if (block) {
- mutex_init(&mm->mutex);
- INIT_LIST_HEAD(&mm->nodes);
- INIT_LIST_HEAD(&mm->free);
- mm->block_size = block;
- mm->heap_nodes = 0;
- }
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
- node->offset = roundup(offset, mm->block_size);
- node->length = rounddown(offset + length, mm->block_size) - node->offset;
-
- list_add_tail(&node->nl_entry, &mm->nodes);
- list_add_tail(&node->fl_entry, &mm->free);
- mm->heap_nodes++;
- return 0;
-}
-
-int
-nouveau_mm_fini(struct nouveau_mm *mm)
-{
- struct nouveau_mm_node *node, *heap =
- list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
- int nodes = 0;
-
- list_for_each_entry(node, &mm->nodes, nl_entry) {
- if (nodes++ == mm->heap_nodes) {
- printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
- list_for_each_entry(node, &mm->nodes, nl_entry) {
- printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
- node->type, node->offset, node->length);
- }
- WARN_ON(1);
- return -EBUSY;
- }
- }
-
- kfree(heap);
- return 0;
-}
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_REGION_H__
-#define __NOUVEAU_REGION_H__
-
-struct nouveau_mm_node {
- struct list_head nl_entry;
- struct list_head fl_entry;
- struct list_head rl_entry;
-
- u8 type;
- u32 offset;
- u32 length;
-};
-
-struct nouveau_mm {
- struct list_head nodes;
- struct list_head free;
-
- struct mutex mutex;
-
- u32 block_size;
- int heap_nodes;
-};
-
-int nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
-int nouveau_mm_fini(struct nouveau_mm *);
-int nouveau_mm_pre(struct nouveau_mm *);
-int nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
- u32 align, struct nouveau_mm_node **);
-void nouveau_mm_put(struct nouveau_mm *, struct nouveau_mm_node *);
-
-int nv50_vram_init(struct drm_device *);
-void nv50_vram_fini(struct drm_device *);
-int nv50_vram_new(struct drm_device *, u64 size, u32 align, u32 size_nc,
- u32 memtype, struct nouveau_mem **);
-void nv50_vram_del(struct drm_device *, struct nouveau_mem **);
-bool nv50_vram_flags_valid(struct drm_device *, u32 tile_flags);
-
-int nvc0_vram_init(struct drm_device *);
-int nvc0_vram_new(struct drm_device *, u64 size, u32 align, u32 ncmin,
- u32 memtype, struct nouveau_mem **);
-bool nvc0_vram_flags_valid(struct drm_device *, u32 tile_flags);
-
-#endif
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
int
nouveau_notifier_init_channel(struct nouveau_channel *chan)
#include "nouveau_drv.h"
#include "nouveau_pm.h"
-#include "nouveau_gpio.h"
+#include <subdev/gpio.h>
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
-
-static u32
-nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_ramht *ramht = chan->ramht;
- u32 hash = 0;
- int i;
-
- NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
-
- for (i = 32; i > 0; i -= ramht->bits) {
- hash ^= (handle & ((1 << ramht->bits) - 1));
- handle >>= ramht->bits;
- }
-
- if (dev_priv->card_type < NV_50)
- hash ^= chan->id << (ramht->bits - 4);
- hash <<= 3;
-
- NV_DEBUG(dev, "hash=0x%08x\n", hash);
- return hash;
-}
-
-static int
-nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
- u32 offset)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 ctx = nv_ro32(ramht, offset + 4);
-
- if (dev_priv->card_type < NV_40)
- return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
- return (ctx != 0);
-}
-
-static int
-nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
- struct nouveau_gpuobj *ramht, u32 offset)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- u32 ctx = nv_ro32(ramht, offset + 4);
-
- if (dev_priv->card_type >= NV_50)
- return true;
- else if (dev_priv->card_type >= NV_40)
- return chan->id ==
- ((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
- else
- return chan->id ==
- ((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
-}
-
-int
-nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
- struct nouveau_gpuobj *gpuobj)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
- struct nouveau_ramht_entry *entry;
- struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
- unsigned long flags;
- u32 ctx, co, ho;
-
- if (nouveau_ramht_find(chan, handle))
- return -EEXIST;
-
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- return -ENOMEM;
- entry->channel = chan;
- entry->gpuobj = NULL;
- entry->handle = handle;
- nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
-
- if (dev_priv->card_type < NV_40) {
- ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->pinst >> 4) |
- (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
- (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
- } else
- if (dev_priv->card_type < NV_50) {
- ctx = (gpuobj->pinst >> 4) |
- (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
- (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
- } else {
- if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
- ctx = (gpuobj->cinst << 10) |
- (chan->id << 28) |
- chan->id; /* HASH_TAG */
- } else {
- ctx = (gpuobj->cinst >> 4) |
- ((gpuobj->engine <<
- NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
- }
- }
-
- spin_lock_irqsave(&chan->ramht->lock, flags);
- list_add(&entry->head, &chan->ramht->entries);
-
- co = ho = nouveau_ramht_hash_handle(chan, handle);
- do {
- if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
- NV_DEBUG(dev,
- "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
- chan->id, co, handle, ctx);
- nv_wo32(ramht, co + 0, handle);
- nv_wo32(ramht, co + 4, ctx);
-
- spin_unlock_irqrestore(&chan->ramht->lock, flags);
- instmem->flush(dev);
- return 0;
- }
- NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
- chan->id, co, nv_ro32(ramht, co));
-
- co += 8;
- if (co >= ramht->size)
- co = 0;
- } while (co != ho);
-
- NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
- list_del(&entry->head);
- spin_unlock_irqrestore(&chan->ramht->lock, flags);
- kfree(entry);
- return -ENOMEM;
-}
-
-static struct nouveau_ramht_entry *
-nouveau_ramht_remove_entry(struct nouveau_channel *chan, u32 handle)
-{
- struct nouveau_ramht *ramht = chan ? chan->ramht : NULL;
- struct nouveau_ramht_entry *entry;
- unsigned long flags;
-
- if (!ramht)
- return NULL;
-
- spin_lock_irqsave(&ramht->lock, flags);
- list_for_each_entry(entry, &ramht->entries, head) {
- if (entry->channel == chan &&
- (!handle || entry->handle == handle)) {
- list_del(&entry->head);
- spin_unlock_irqrestore(&ramht->lock, flags);
-
- return entry;
- }
- }
- spin_unlock_irqrestore(&ramht->lock, flags);
-
- return NULL;
-}
-
-static void
-nouveau_ramht_remove_hash(struct nouveau_channel *chan, u32 handle)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
- struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
- unsigned long flags;
- u32 co, ho;
-
- spin_lock_irqsave(&chan->ramht->lock, flags);
- co = ho = nouveau_ramht_hash_handle(chan, handle);
- do {
- if (nouveau_ramht_entry_valid(dev, ramht, co) &&
- nouveau_ramht_entry_same_channel(chan, ramht, co) &&
- (handle == nv_ro32(ramht, co))) {
- NV_DEBUG(dev,
- "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
- chan->id, co, handle, nv_ro32(ramht, co + 4));
- nv_wo32(ramht, co + 0, 0x00000000);
- nv_wo32(ramht, co + 4, 0x00000000);
- instmem->flush(dev);
- goto out;
- }
-
- co += 8;
- if (co >= ramht->size)
- co = 0;
- } while (co != ho);
-
- NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
- chan->id, handle);
-out:
- spin_unlock_irqrestore(&chan->ramht->lock, flags);
-}
-
-int
-nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
-{
- struct nouveau_ramht_entry *entry;
-
- entry = nouveau_ramht_remove_entry(chan, handle);
- if (!entry)
- return -ENOENT;
-
- nouveau_ramht_remove_hash(chan, entry->handle);
- nouveau_gpuobj_ref(NULL, &entry->gpuobj);
- kfree(entry);
- return 0;
-}
-
-struct nouveau_gpuobj *
-nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
-{
- struct nouveau_ramht *ramht = chan->ramht;
- struct nouveau_ramht_entry *entry;
- struct nouveau_gpuobj *gpuobj = NULL;
- unsigned long flags;
-
- if (unlikely(!chan->ramht))
- return NULL;
-
- spin_lock_irqsave(&ramht->lock, flags);
- list_for_each_entry(entry, &chan->ramht->entries, head) {
- if (entry->channel == chan && entry->handle == handle) {
- gpuobj = entry->gpuobj;
- break;
- }
- }
- spin_unlock_irqrestore(&ramht->lock, flags);
-
- return gpuobj;
-}
-
-int
-nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
- struct nouveau_ramht **pramht)
-{
- struct nouveau_ramht *ramht;
-
- ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
- if (!ramht)
- return -ENOMEM;
-
- ramht->dev = dev;
- kref_init(&ramht->refcount);
- ramht->bits = drm_order(gpuobj->size / 8);
- INIT_LIST_HEAD(&ramht->entries);
- spin_lock_init(&ramht->lock);
- nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
-
- *pramht = ramht;
- return 0;
-}
-
-static void
-nouveau_ramht_del(struct kref *ref)
-{
- struct nouveau_ramht *ramht =
- container_of(ref, struct nouveau_ramht, refcount);
-
- nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
- kfree(ramht);
-}
-
-void
-nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
- struct nouveau_channel *chan)
-{
- struct nouveau_ramht_entry *entry;
- struct nouveau_ramht *ramht;
-
- if (ref)
- kref_get(&ref->refcount);
-
- ramht = *ptr;
- if (ramht) {
- while ((entry = nouveau_ramht_remove_entry(chan, 0))) {
- nouveau_ramht_remove_hash(chan, entry->handle);
- nouveau_gpuobj_ref(NULL, &entry->gpuobj);
- kfree(entry);
- }
-
- kref_put(&ramht->refcount, nouveau_ramht_del);
- }
- *ptr = ref;
-}
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_RAMHT_H__
-#define __NOUVEAU_RAMHT_H__
-
-struct nouveau_ramht_entry {
- struct list_head head;
- struct nouveau_channel *channel;
- struct nouveau_gpuobj *gpuobj;
- u32 handle;
-};
-
-struct nouveau_ramht {
- struct drm_device *dev;
- struct kref refcount;
- spinlock_t lock;
- struct nouveau_gpuobj *gpuobj;
- struct list_head entries;
- int bits;
-};
-
-extern int nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
- struct nouveau_ramht **);
-extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
- struct nouveau_channel *unref_channel);
-
-extern int nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
- struct nouveau_gpuobj *);
-extern int nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
-extern struct nouveau_gpuobj *
-nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
-
-#endif
#include "nouveau_drv.h"
#include "nouveau_drm.h"
#include "nouveau_fbcon.h"
-#include "nouveau_ramht.h"
-#include "nouveau_gpio.h"
+#include <core/ramht.h>
+#include <subdev/gpio.h>
#include "nouveau_pm.h"
#include "nv50_display.h"
-#include "nouveau_fifo.h"
+#include <engine/fifo.h>
#include "nouveau_fence.h"
#include "nouveau_software.h"
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nouveau_vm.h"
-
-void
-nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
-{
- struct nouveau_vm *vm = vma->vm;
- struct nouveau_mm_node *r;
- int big = vma->node->type != vm->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (vm->pgt_bits - bits);
- u32 end, len;
-
- delta = 0;
- list_for_each_entry(r, &node->regions, rl_entry) {
- u64 phys = (u64)r->offset << 12;
- u32 num = r->length >> bits;
-
- while (num) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- vm->map(vma, pgt, node, pte, len, phys, delta);
-
- num -= len;
- pte += len;
- if (unlikely(end >= max)) {
- phys += len << (bits + 12);
- pde++;
- pte = 0;
- }
-
- delta += (u64)len << vma->node->type;
- }
- }
-
- vm->flush(vm);
-}
-
-void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
-{
- nouveau_vm_map_at(vma, 0, node);
-}
-
-void
-nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem)
-{
- struct nouveau_vm *vm = vma->vm;
- int big = vma->node->type != vm->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (vm->pgt_bits - bits);
- unsigned m, sglen;
- u32 end, len;
- int i;
- struct scatterlist *sg;
-
- for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
- sglen = sg_dma_len(sg) >> PAGE_SHIFT;
-
- end = pte + sglen;
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- for (m = 0; m < len; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
- vm->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
- pte++;
-
- if (num == 0)
- goto finish;
- }
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- if (m < sglen) {
- for (; m < sglen; m++) {
- dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
-
- vm->map_sg(vma, pgt, mem, pte, 1, &addr);
- num--;
- pte++;
- if (num == 0)
- goto finish;
- }
- }
-
- }
-finish:
- vm->flush(vm);
-}
-
-void
-nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem)
-{
- struct nouveau_vm *vm = vma->vm;
- dma_addr_t *list = mem->pages;
- int big = vma->node->type != vm->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (vm->pgt_bits - bits);
- u32 end, len;
-
- while (num) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- vm->map_sg(vma, pgt, mem, pte, len, list);
-
- num -= len;
- pte += len;
- list += len;
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- }
-
- vm->flush(vm);
-}
-
-void
-nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
-{
- struct nouveau_vm *vm = vma->vm;
- int big = vma->node->type != vm->spg_shift;
- u32 offset = vma->node->offset + (delta >> 12);
- u32 bits = vma->node->type - 12;
- u32 num = length >> vma->node->type;
- u32 pde = (offset >> vm->pgt_bits) - vm->fpde;
- u32 pte = (offset & ((1 << vm->pgt_bits) - 1)) >> bits;
- u32 max = 1 << (vm->pgt_bits - bits);
- u32 end, len;
-
- while (num) {
- struct nouveau_gpuobj *pgt = vm->pgt[pde].obj[big];
-
- end = (pte + num);
- if (unlikely(end >= max))
- end = max;
- len = end - pte;
-
- vm->unmap(pgt, pte, len);
-
- num -= len;
- pte += len;
- if (unlikely(end >= max)) {
- pde++;
- pte = 0;
- }
- }
-
- vm->flush(vm);
-}
-
-void
-nouveau_vm_unmap(struct nouveau_vma *vma)
-{
- nouveau_vm_unmap_at(vma, 0, (u64)vma->node->length << 12);
-}
-
-static void
-nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
-{
- struct nouveau_vm_pgd *vpgd;
- struct nouveau_vm_pgt *vpgt;
- struct nouveau_gpuobj *pgt;
- u32 pde;
-
- for (pde = fpde; pde <= lpde; pde++) {
- vpgt = &vm->pgt[pde - vm->fpde];
- if (--vpgt->refcount[big])
- continue;
-
- pgt = vpgt->obj[big];
- vpgt->obj[big] = NULL;
-
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- vm->map_pgt(vpgd->obj, pde, vpgt->obj);
- }
-
- mutex_unlock(&vm->mm.mutex);
- nouveau_gpuobj_ref(NULL, &pgt);
- mutex_lock(&vm->mm.mutex);
- }
-}
-
-static int
-nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
-{
- struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- struct nouveau_vm_pgd *vpgd;
- struct nouveau_gpuobj *pgt;
- int big = (type != vm->spg_shift);
- u32 pgt_size;
- int ret;
-
- pgt_size = (1 << (vm->pgt_bits + 12)) >> type;
- pgt_size *= 8;
-
- mutex_unlock(&vm->mm.mutex);
- ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &pgt);
- mutex_lock(&vm->mm.mutex);
- if (unlikely(ret))
- return ret;
-
- /* someone beat us to filling the PDE while we didn't have the lock */
- if (unlikely(vpgt->refcount[big]++)) {
- mutex_unlock(&vm->mm.mutex);
- nouveau_gpuobj_ref(NULL, &pgt);
- mutex_lock(&vm->mm.mutex);
- return 0;
- }
-
- vpgt->obj[big] = pgt;
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- vm->map_pgt(vpgd->obj, pde, vpgt->obj);
- }
-
- return 0;
-}
-
-int
-nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
- u32 access, struct nouveau_vma *vma)
-{
- u32 align = (1 << page_shift) >> 12;
- u32 msize = size >> 12;
- u32 fpde, lpde, pde;
- int ret;
-
- mutex_lock(&vm->mm.mutex);
- ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
- if (unlikely(ret != 0)) {
- mutex_unlock(&vm->mm.mutex);
- return ret;
- }
-
- fpde = (vma->node->offset >> vm->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
- for (pde = fpde; pde <= lpde; pde++) {
- struct nouveau_vm_pgt *vpgt = &vm->pgt[pde - vm->fpde];
- int big = (vma->node->type != vm->spg_shift);
-
- if (likely(vpgt->refcount[big])) {
- vpgt->refcount[big]++;
- continue;
- }
-
- ret = nouveau_vm_map_pgt(vm, pde, vma->node->type);
- if (ret) {
- if (pde != fpde)
- nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
- nouveau_mm_put(&vm->mm, vma->node);
- mutex_unlock(&vm->mm.mutex);
- vma->node = NULL;
- return ret;
- }
- }
- mutex_unlock(&vm->mm.mutex);
-
- vma->vm = vm;
- vma->offset = (u64)vma->node->offset << 12;
- vma->access = access;
- return 0;
-}
-
-void
-nouveau_vm_put(struct nouveau_vma *vma)
-{
- struct nouveau_vm *vm = vma->vm;
- u32 fpde, lpde;
-
- if (unlikely(vma->node == NULL))
- return;
- fpde = (vma->node->offset >> vm->pgt_bits);
- lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
-
- mutex_lock(&vm->mm.mutex);
- nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
- nouveau_mm_put(&vm->mm, vma->node);
- vma->node = NULL;
- mutex_unlock(&vm->mm.mutex);
-}
-
-int
-nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
- struct nouveau_vm **pvm)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_vm *vm;
- u64 mm_length = (offset + length) - mm_offset;
- u32 block, pgt_bits;
- int ret;
-
- vm = kzalloc(sizeof(*vm), GFP_KERNEL);
- if (!vm)
- return -ENOMEM;
-
- if (dev_priv->card_type == NV_50) {
- vm->map_pgt = nv50_vm_map_pgt;
- vm->map = nv50_vm_map;
- vm->map_sg = nv50_vm_map_sg;
- vm->unmap = nv50_vm_unmap;
- vm->flush = nv50_vm_flush;
- vm->spg_shift = 12;
- vm->lpg_shift = 16;
-
- pgt_bits = 29;
- block = (1 << pgt_bits);
- if (length < block)
- block = length;
-
- } else
- if (dev_priv->card_type >= NV_C0) {
- vm->map_pgt = nvc0_vm_map_pgt;
- vm->map = nvc0_vm_map;
- vm->map_sg = nvc0_vm_map_sg;
- vm->unmap = nvc0_vm_unmap;
- vm->flush = nvc0_vm_flush;
- vm->spg_shift = 12;
- vm->lpg_shift = 17;
- pgt_bits = 27;
- block = 4096;
- } else {
- kfree(vm);
- return -ENOSYS;
- }
-
- vm->fpde = offset >> pgt_bits;
- vm->lpde = (offset + length - 1) >> pgt_bits;
- vm->pgt = kcalloc(vm->lpde - vm->fpde + 1, sizeof(*vm->pgt), GFP_KERNEL);
- if (!vm->pgt) {
- kfree(vm);
- return -ENOMEM;
- }
-
- INIT_LIST_HEAD(&vm->pgd_list);
- vm->dev = dev;
- vm->refcount = 1;
- vm->pgt_bits = pgt_bits - 12;
-
- ret = nouveau_mm_init(&vm->mm, mm_offset >> 12, mm_length >> 12,
- block >> 12);
- if (ret) {
- kfree(vm);
- return ret;
- }
-
- *pvm = vm;
- return 0;
-}
-
-static int
-nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
-{
- struct nouveau_vm_pgd *vpgd;
- int i;
-
- if (!pgd)
- return 0;
-
- vpgd = kzalloc(sizeof(*vpgd), GFP_KERNEL);
- if (!vpgd)
- return -ENOMEM;
-
- nouveau_gpuobj_ref(pgd, &vpgd->obj);
-
- mutex_lock(&vm->mm.mutex);
- for (i = vm->fpde; i <= vm->lpde; i++)
- vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
- list_add(&vpgd->head, &vm->pgd_list);
- mutex_unlock(&vm->mm.mutex);
- return 0;
-}
-
-static void
-nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
-{
- struct nouveau_vm_pgd *vpgd, *tmp;
- struct nouveau_gpuobj *pgd = NULL;
-
- if (!mpgd)
- return;
-
- mutex_lock(&vm->mm.mutex);
- list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- if (vpgd->obj == mpgd) {
- pgd = vpgd->obj;
- list_del(&vpgd->head);
- kfree(vpgd);
- break;
- }
- }
- mutex_unlock(&vm->mm.mutex);
-
- nouveau_gpuobj_ref(NULL, &pgd);
-}
-
-static void
-nouveau_vm_del(struct nouveau_vm *vm)
-{
- struct nouveau_vm_pgd *vpgd, *tmp;
-
- list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
- nouveau_vm_unlink(vm, vpgd->obj);
- }
-
- nouveau_mm_fini(&vm->mm);
- kfree(vm->pgt);
- kfree(vm);
-}
-
-int
-nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr,
- struct nouveau_gpuobj *pgd)
-{
- struct nouveau_vm *vm;
- int ret;
-
- vm = ref;
- if (vm) {
- ret = nouveau_vm_link(vm, pgd);
- if (ret)
- return ret;
-
- vm->refcount++;
- }
-
- vm = *ptr;
- *ptr = ref;
-
- if (vm) {
- nouveau_vm_unlink(vm, pgd);
-
- if (--vm->refcount == 0)
- nouveau_vm_del(vm);
- }
-
- return 0;
-}
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NOUVEAU_VM_H__
-#define __NOUVEAU_VM_H__
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-
-struct nouveau_vm_pgt {
- struct nouveau_gpuobj *obj[2];
- u32 refcount[2];
-};
-
-struct nouveau_vm_pgd {
- struct list_head head;
- struct nouveau_gpuobj *obj;
-};
-
-struct nouveau_vma {
- struct list_head head;
- int refcount;
- struct nouveau_vm *vm;
- struct nouveau_mm_node *node;
- u64 offset;
- u32 access;
-};
-
-struct nouveau_vm {
- struct drm_device *dev;
- struct nouveau_mm mm;
- int refcount;
-
- struct list_head pgd_list;
- atomic_t engref[16];
-
- struct nouveau_vm_pgt *pgt;
- u32 fpde;
- u32 lpde;
-
- u32 pgt_bits;
- u8 spg_shift;
- u8 lpg_shift;
-
- void (*map_pgt)(struct nouveau_gpuobj *pgd, u32 pde,
- struct nouveau_gpuobj *pgt[2]);
- void (*map)(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt,
- u64 phys, u64 delta);
- void (*map_sg)(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
-
- void (*map_sg_table)(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
- void (*unmap)(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt);
- void (*flush)(struct nouveau_vm *);
-};
-
-/* nouveau_vm.c */
-int nouveau_vm_new(struct drm_device *, u64 offset, u64 length, u64 mm_offset,
- struct nouveau_vm **);
-int nouveau_vm_ref(struct nouveau_vm *, struct nouveau_vm **,
- struct nouveau_gpuobj *pgd);
-int nouveau_vm_get(struct nouveau_vm *, u64 size, u32 page_shift,
- u32 access, struct nouveau_vma *);
-void nouveau_vm_put(struct nouveau_vma *);
-void nouveau_vm_map(struct nouveau_vma *, struct nouveau_mem *);
-void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
-void nouveau_vm_unmap(struct nouveau_vma *);
-void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
-void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- struct nouveau_mem *);
-void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem);
-/* nv50_vm.c */
-void nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
- struct nouveau_gpuobj *pgt[2]);
-void nv50_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
-void nv50_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
-void nv50_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
-void nv50_vm_flush(struct nouveau_vm *);
-void nv50_vm_flush_engine(struct drm_device *, int engine);
-
-/* nvc0_vm.c */
-void nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
- struct nouveau_gpuobj *pgt[2]);
-void nvc0_vm_map(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt, u64 phys, u64 delta);
-void nvc0_vm_map_sg(struct nouveau_vma *, struct nouveau_gpuobj *,
- struct nouveau_mem *, u32 pte, u32 cnt, dma_addr_t *);
-void nvc0_vm_unmap(struct nouveau_gpuobj *, u32 pte, u32 cnt);
-void nvc0_vm_flush(struct nouveau_vm *);
-
-#endif
#include "nouveau_drv.h"
#include "nouveau_pm.h"
-#include "nouveau_gpio.h"
+#include <subdev/gpio.h>
static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
#include "nouveau_hw.h"
-#include "nouveau_gpio.h"
+#include <subdev/gpio.h>
#include "nvreg.h"
int nv04_dac_output_offset(struct drm_encoder *encoder)
+++ /dev/null
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-int
-nv04_fb_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 boot0 = nv_rd32(dev, NV04_PFB_BOOT_0);
-
- if (boot0 & 0x00000100) {
- dev_priv->vram_size = ((boot0 >> 12) & 0xf) * 2 + 2;
- dev_priv->vram_size *= 1024 * 1024;
- } else {
- switch (boot0 & NV04_PFB_BOOT_0_RAM_AMOUNT) {
- case NV04_PFB_BOOT_0_RAM_AMOUNT_32MB:
- dev_priv->vram_size = 32 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_16MB:
- dev_priv->vram_size = 16 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_8MB:
- dev_priv->vram_size = 8 * 1024 * 1024;
- break;
- case NV04_PFB_BOOT_0_RAM_AMOUNT_4MB:
- dev_priv->vram_size = 4 * 1024 * 1024;
- break;
- }
- }
-
- if ((boot0 & 0x00000038) <= 0x10)
- dev_priv->vram_type = NV_MEM_TYPE_SGRAM;
- else
- dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
-
- return 0;
-}
-
-int
-nv04_fb_init(struct drm_device *dev)
-{
- /* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
- * nvidia reading PFB_CFG_0, then writing back its original value.
- * (which was 0x701114 in this case)
- */
-
- nv_wr32(dev, NV04_PFB_CFG0, 0x1114);
- return 0;
-}
-
-void
-nv04_fb_takedown(struct drm_device *dev)
-{
-}
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_fbcon.h"
int
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_fence.h"
struct nv04_fence_chan {
+++ /dev/null
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_util.h"
-#include "nouveau_ramht.h"
-#include "nouveau_software.h"
-
-static struct ramfc_desc {
- unsigned bits:6;
- unsigned ctxs:5;
- unsigned ctxp:8;
- unsigned regs:5;
- unsigned regp;
-} nv04_ramfc[] = {
- { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
- { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
- { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
- { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
- { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
- { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
- { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
- { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
- {}
-};
-
-struct nv04_fifo_priv {
- struct nouveau_fifo_priv base;
- struct ramfc_desc *ramfc_desc;
-};
-
-struct nv04_fifo_chan {
- struct nouveau_fifo_chan base;
- struct nouveau_gpuobj *ramfc;
-};
-
-bool
-nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
-{
- int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
-
- if (!enable) {
- /* In some cases the PFIFO puller may be left in an
- * inconsistent state if you try to stop it when it's
- * busy translating handles. Sometimes you get a
- * PFIFO_CACHE_ERROR, sometimes it just fails silently
- * sending incorrect instance offsets to PGRAPH after
- * it's started up again. To avoid the latter we
- * invalidate the most recently calculated instance.
- */
- if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
- NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
- NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
-
- if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
- NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
- }
-
- return pull & 1;
-}
-
-static int
-nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv = nv_engine(dev, engine);
- struct nv04_fifo_chan *fctx;
- unsigned long flags;
- int ret;
-
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- /* map channel control registers */
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* initialise default fifo context */
- ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
- chan->id * 32, ~0, 32,
- NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
- if (ret)
- goto error;
-
- nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
- nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
- nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
- nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
- nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
- nv_wo32(fctx->ramfc, 0x14, 0x00000000);
- nv_wo32(fctx->ramfc, 0x18, 0x00000000);
- nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
-
- /* enable dma mode on the channel */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
-}
-
-void
-nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
- struct nv04_fifo_chan *fctx = chan->engctx[engine];
- struct ramfc_desc *c = priv->ramfc_desc;
- unsigned long flags;
- int chid;
-
- /* prevent fifo context switches */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-
- /* if this channel is active, replace it with a null context */
- chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
- if (chid == chan->id) {
- nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
- nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
-
- do {
- u32 mask = ((1ULL << c->bits) - 1) << c->regs;
- nv_mask(dev, c->regp, mask, 0x00000000);
- } while ((++c)->bits);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- }
-
- /* restore normal operation, after disabling dma mode */
- nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
- nv_wr32(dev, NV03_PFIFO_CACHES, 1);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* clean up */
- nouveau_gpuobj_ref(NULL, &fctx->ramfc);
- nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
- }
-}
-
-int
-nv04_fifo_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv = nv_engine(dev, engine);
- int i;
-
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
-
- nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
- nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
- nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht->bits - 9) << 16) |
- (dev_priv->ramht->gpuobj->pinst >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
- nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
-
- nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- nv_wr32(dev, NV03_PFIFO_CACHES, 1);
-
- for (i = 0; i < priv->base.channels; i++) {
- if (dev_priv->channels.ptr[i])
- nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
- }
-
- return 0;
-}
-
-int
-nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv = nv_engine(dev, engine);
- struct nouveau_channel *chan;
- int chid;
-
- /* prevent context switches and halt fifo operation */
- nv_wr32(dev, NV03_PFIFO_CACHES, 0);
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
-
- /* store current fifo context in ramfc */
- chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
- chan = dev_priv->channels.ptr[chid];
- if (suspend && chid != priv->base.channels && chan) {
- struct nv04_fifo_chan *fctx = chan->engctx[engine];
- struct nouveau_gpuobj *ctx = fctx->ramfc;
- struct ramfc_desc *c = priv->ramfc_desc;
- do {
- u32 rm = ((1ULL << c->bits) - 1) << c->regs;
- u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
- u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
- u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
- nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
- } while ((++c)->bits);
- }
-
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
- return 0;
-}
-
-static bool
-nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
- struct nouveau_gpuobj *obj;
- unsigned long flags;
- const int subc = (addr >> 13) & 0x7;
- const int mthd = addr & 0x1ffc;
- bool handled = false;
- u32 engine;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < pfifo->channels))
- chan = dev_priv->channels.ptr[chid];
- if (unlikely(!chan))
- goto out;
-
- switch (mthd) {
- case 0x0000: /* bind object to subchannel */
- obj = nouveau_ramht_find(chan, data);
- if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
- break;
-
- engine = 0x0000000f << (subc * 4);
-
- nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
- handled = true;
- break;
- default:
- engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
- if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
- break;
-
- if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
- mthd, data))
- handled = true;
- break;
- }
-
-out:
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return handled;
-}
-
-static const char *nv_dma_state_err(u32 state)
-{
- static const char * const desc[] = {
- "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
- "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
- };
- return desc[(state >> 29) & 0x7];
-}
-
-void
-nv04_fifo_isr(struct drm_device *dev)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t status, reassign;
- int cnt = 0;
-
- reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
- while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
- uint32_t chid, get;
-
- nv_wr32(dev, NV03_PFIFO_CACHES, 0);
-
- chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
- get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
-
- if (status & NV_PFIFO_INTR_CACHE_ERROR) {
- uint32_t mthd, data;
- int ptr;
-
- /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
- * wrapping on my G80 chips, but CACHE1 isn't big
- * enough for this much data.. Tests show that it
- * wraps around to the start at GET=0x800.. No clue
- * as to why..
- */
- ptr = (get & 0x7ff) >> 2;
-
- if (dev_priv->card_type < NV_40) {
- mthd = nv_rd32(dev,
- NV04_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(dev,
- NV04_PFIFO_CACHE1_DATA(ptr));
- } else {
- mthd = nv_rd32(dev,
- NV40_PFIFO_CACHE1_METHOD(ptr));
- data = nv_rd32(dev,
- NV40_PFIFO_CACHE1_DATA(ptr));
- }
-
- if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
- NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
- "Mthd 0x%04x Data 0x%08x\n",
- chid, (mthd >> 13) & 7, mthd & 0x1ffc,
- data);
- }
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_CACHE_ERROR);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
- nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
-
- nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
- nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
-
- status &= ~NV_PFIFO_INTR_CACHE_ERROR;
- }
-
- if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- u32 dma_get = nv_rd32(dev, 0x003244);
- u32 dma_put = nv_rd32(dev, 0x003240);
- u32 push = nv_rd32(dev, 0x003220);
- u32 state = nv_rd32(dev, 0x003228);
-
- if (dev_priv->card_type == NV_50) {
- u32 ho_get = nv_rd32(dev, 0x003328);
- u32 ho_put = nv_rd32(dev, 0x003320);
- u32 ib_get = nv_rd32(dev, 0x003334);
- u32 ib_put = nv_rd32(dev, 0x003330);
-
- if (nouveau_ratelimit())
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
- "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
- "State 0x%08x (err: %s) Push 0x%08x\n",
- chid, ho_get, dma_get, ho_put,
- dma_put, ib_get, ib_put, state,
- nv_dma_state_err(state),
- push);
-
- /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
- nv_wr32(dev, 0x003364, 0x00000000);
- if (dma_get != dma_put || ho_get != ho_put) {
- nv_wr32(dev, 0x003244, dma_put);
- nv_wr32(dev, 0x003328, ho_put);
- } else
- if (ib_get != ib_put) {
- nv_wr32(dev, 0x003334, ib_put);
- }
- } else {
- NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
- "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
- chid, dma_get, dma_put, state,
- nv_dma_state_err(state), push);
-
- if (dma_get != dma_put)
- nv_wr32(dev, 0x003244, dma_put);
- }
-
- nv_wr32(dev, 0x003228, 0x00000000);
- nv_wr32(dev, 0x003220, 0x00000001);
- nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
- status &= ~NV_PFIFO_INTR_DMA_PUSHER;
- }
-
- if (status & NV_PFIFO_INTR_SEMAPHORE) {
- uint32_t sem;
-
- status &= ~NV_PFIFO_INTR_SEMAPHORE;
- nv_wr32(dev, NV03_PFIFO_INTR_0,
- NV_PFIFO_INTR_SEMAPHORE);
-
- sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
- nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- }
-
- if (dev_priv->card_type == NV_50) {
- if (status & 0x00000010) {
- nv50_fb_vm_trap(dev, nouveau_ratelimit());
- status &= ~0x00000010;
- nv_wr32(dev, 0x002100, 0x00000010);
- }
- }
-
- if (status) {
- if (nouveau_ratelimit())
- NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
- status, chid);
- nv_wr32(dev, NV03_PFIFO_INTR_0, status);
- status = 0;
- }
-
- nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
- }
-
- if (status) {
- NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
- nv_wr32(dev, 0x2140, 0);
- nv_wr32(dev, 0x140, 0);
- }
-
- nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
-}
-
-void
-nv04_fifo_destroy(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 8);
-
- dev_priv->eng[engine] = NULL;
- kfree(priv);
-}
-
-int
-nv04_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv04_fifo_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.base.destroy = nv04_fifo_destroy;
- priv->base.base.init = nv04_fifo_init;
- priv->base.base.fini = nv04_fifo_fini;
- priv->base.base.context_new = nv04_fifo_context_new;
- priv->base.base.context_del = nv04_fifo_context_del;
- priv->base.channels = 15;
- priv->ramfc_desc = nv04_ramfc;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- return 0;
-}
+++ /dev/null
-/*
- * Copyright 2007 Stephane Marchesin
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_hw.h"
-#include "nouveau_util.h"
-#include "nouveau_ramht.h"
-
-struct nv04_graph_engine {
- struct nouveau_exec_engine base;
-};
-
-static uint32_t nv04_graph_ctx_regs[] = {
- 0x0040053c,
- 0x00400544,
- 0x00400540,
- 0x00400548,
- NV04_PGRAPH_CTX_SWITCH1,
- NV04_PGRAPH_CTX_SWITCH2,
- NV04_PGRAPH_CTX_SWITCH3,
- NV04_PGRAPH_CTX_SWITCH4,
- NV04_PGRAPH_CTX_CACHE1,
- NV04_PGRAPH_CTX_CACHE2,
- NV04_PGRAPH_CTX_CACHE3,
- NV04_PGRAPH_CTX_CACHE4,
- 0x00400184,
- 0x004001a4,
- 0x004001c4,
- 0x004001e4,
- 0x00400188,
- 0x004001a8,
- 0x004001c8,
- 0x004001e8,
- 0x0040018c,
- 0x004001ac,
- 0x004001cc,
- 0x004001ec,
- 0x00400190,
- 0x004001b0,
- 0x004001d0,
- 0x004001f0,
- 0x00400194,
- 0x004001b4,
- 0x004001d4,
- 0x004001f4,
- 0x00400198,
- 0x004001b8,
- 0x004001d8,
- 0x004001f8,
- 0x0040019c,
- 0x004001bc,
- 0x004001dc,
- 0x004001fc,
- 0x00400174,
- NV04_PGRAPH_DMA_START_0,
- NV04_PGRAPH_DMA_START_1,
- NV04_PGRAPH_DMA_LENGTH,
- NV04_PGRAPH_DMA_MISC,
- NV04_PGRAPH_DMA_PITCH,
- NV04_PGRAPH_BOFFSET0,
- NV04_PGRAPH_BBASE0,
- NV04_PGRAPH_BLIMIT0,
- NV04_PGRAPH_BOFFSET1,
- NV04_PGRAPH_BBASE1,
- NV04_PGRAPH_BLIMIT1,
- NV04_PGRAPH_BOFFSET2,
- NV04_PGRAPH_BBASE2,
- NV04_PGRAPH_BLIMIT2,
- NV04_PGRAPH_BOFFSET3,
- NV04_PGRAPH_BBASE3,
- NV04_PGRAPH_BLIMIT3,
- NV04_PGRAPH_BOFFSET4,
- NV04_PGRAPH_BBASE4,
- NV04_PGRAPH_BLIMIT4,
- NV04_PGRAPH_BOFFSET5,
- NV04_PGRAPH_BBASE5,
- NV04_PGRAPH_BLIMIT5,
- NV04_PGRAPH_BPITCH0,
- NV04_PGRAPH_BPITCH1,
- NV04_PGRAPH_BPITCH2,
- NV04_PGRAPH_BPITCH3,
- NV04_PGRAPH_BPITCH4,
- NV04_PGRAPH_SURFACE,
- NV04_PGRAPH_STATE,
- NV04_PGRAPH_BSWIZZLE2,
- NV04_PGRAPH_BSWIZZLE5,
- NV04_PGRAPH_BPIXEL,
- NV04_PGRAPH_NOTIFY,
- NV04_PGRAPH_PATT_COLOR0,
- NV04_PGRAPH_PATT_COLOR1,
- NV04_PGRAPH_PATT_COLORRAM+0x00,
- NV04_PGRAPH_PATT_COLORRAM+0x04,
- NV04_PGRAPH_PATT_COLORRAM+0x08,
- NV04_PGRAPH_PATT_COLORRAM+0x0c,
- NV04_PGRAPH_PATT_COLORRAM+0x10,
- NV04_PGRAPH_PATT_COLORRAM+0x14,
- NV04_PGRAPH_PATT_COLORRAM+0x18,
- NV04_PGRAPH_PATT_COLORRAM+0x1c,
- NV04_PGRAPH_PATT_COLORRAM+0x20,
- NV04_PGRAPH_PATT_COLORRAM+0x24,
- NV04_PGRAPH_PATT_COLORRAM+0x28,
- NV04_PGRAPH_PATT_COLORRAM+0x2c,
- NV04_PGRAPH_PATT_COLORRAM+0x30,
- NV04_PGRAPH_PATT_COLORRAM+0x34,
- NV04_PGRAPH_PATT_COLORRAM+0x38,
- NV04_PGRAPH_PATT_COLORRAM+0x3c,
- NV04_PGRAPH_PATT_COLORRAM+0x40,
- NV04_PGRAPH_PATT_COLORRAM+0x44,
- NV04_PGRAPH_PATT_COLORRAM+0x48,
- NV04_PGRAPH_PATT_COLORRAM+0x4c,
- NV04_PGRAPH_PATT_COLORRAM+0x50,
- NV04_PGRAPH_PATT_COLORRAM+0x54,
- NV04_PGRAPH_PATT_COLORRAM+0x58,
- NV04_PGRAPH_PATT_COLORRAM+0x5c,
- NV04_PGRAPH_PATT_COLORRAM+0x60,
- NV04_PGRAPH_PATT_COLORRAM+0x64,
- NV04_PGRAPH_PATT_COLORRAM+0x68,
- NV04_PGRAPH_PATT_COLORRAM+0x6c,
- NV04_PGRAPH_PATT_COLORRAM+0x70,
- NV04_PGRAPH_PATT_COLORRAM+0x74,
- NV04_PGRAPH_PATT_COLORRAM+0x78,
- NV04_PGRAPH_PATT_COLORRAM+0x7c,
- NV04_PGRAPH_PATT_COLORRAM+0x80,
- NV04_PGRAPH_PATT_COLORRAM+0x84,
- NV04_PGRAPH_PATT_COLORRAM+0x88,
- NV04_PGRAPH_PATT_COLORRAM+0x8c,
- NV04_PGRAPH_PATT_COLORRAM+0x90,
- NV04_PGRAPH_PATT_COLORRAM+0x94,
- NV04_PGRAPH_PATT_COLORRAM+0x98,
- NV04_PGRAPH_PATT_COLORRAM+0x9c,
- NV04_PGRAPH_PATT_COLORRAM+0xa0,
- NV04_PGRAPH_PATT_COLORRAM+0xa4,
- NV04_PGRAPH_PATT_COLORRAM+0xa8,
- NV04_PGRAPH_PATT_COLORRAM+0xac,
- NV04_PGRAPH_PATT_COLORRAM+0xb0,
- NV04_PGRAPH_PATT_COLORRAM+0xb4,
- NV04_PGRAPH_PATT_COLORRAM+0xb8,
- NV04_PGRAPH_PATT_COLORRAM+0xbc,
- NV04_PGRAPH_PATT_COLORRAM+0xc0,
- NV04_PGRAPH_PATT_COLORRAM+0xc4,
- NV04_PGRAPH_PATT_COLORRAM+0xc8,
- NV04_PGRAPH_PATT_COLORRAM+0xcc,
- NV04_PGRAPH_PATT_COLORRAM+0xd0,
- NV04_PGRAPH_PATT_COLORRAM+0xd4,
- NV04_PGRAPH_PATT_COLORRAM+0xd8,
- NV04_PGRAPH_PATT_COLORRAM+0xdc,
- NV04_PGRAPH_PATT_COLORRAM+0xe0,
- NV04_PGRAPH_PATT_COLORRAM+0xe4,
- NV04_PGRAPH_PATT_COLORRAM+0xe8,
- NV04_PGRAPH_PATT_COLORRAM+0xec,
- NV04_PGRAPH_PATT_COLORRAM+0xf0,
- NV04_PGRAPH_PATT_COLORRAM+0xf4,
- NV04_PGRAPH_PATT_COLORRAM+0xf8,
- NV04_PGRAPH_PATT_COLORRAM+0xfc,
- NV04_PGRAPH_PATTERN,
- 0x0040080c,
- NV04_PGRAPH_PATTERN_SHAPE,
- 0x00400600,
- NV04_PGRAPH_ROP3,
- NV04_PGRAPH_CHROMA,
- NV04_PGRAPH_BETA_AND,
- NV04_PGRAPH_BETA_PREMULT,
- NV04_PGRAPH_CONTROL0,
- NV04_PGRAPH_CONTROL1,
- NV04_PGRAPH_CONTROL2,
- NV04_PGRAPH_BLEND,
- NV04_PGRAPH_STORED_FMT,
- NV04_PGRAPH_SOURCE_COLOR,
- 0x00400560,
- 0x00400568,
- 0x00400564,
- 0x0040056c,
- 0x00400400,
- 0x00400480,
- 0x00400404,
- 0x00400484,
- 0x00400408,
- 0x00400488,
- 0x0040040c,
- 0x0040048c,
- 0x00400410,
- 0x00400490,
- 0x00400414,
- 0x00400494,
- 0x00400418,
- 0x00400498,
- 0x0040041c,
- 0x0040049c,
- 0x00400420,
- 0x004004a0,
- 0x00400424,
- 0x004004a4,
- 0x00400428,
- 0x004004a8,
- 0x0040042c,
- 0x004004ac,
- 0x00400430,
- 0x004004b0,
- 0x00400434,
- 0x004004b4,
- 0x00400438,
- 0x004004b8,
- 0x0040043c,
- 0x004004bc,
- 0x00400440,
- 0x004004c0,
- 0x00400444,
- 0x004004c4,
- 0x00400448,
- 0x004004c8,
- 0x0040044c,
- 0x004004cc,
- 0x00400450,
- 0x004004d0,
- 0x00400454,
- 0x004004d4,
- 0x00400458,
- 0x004004d8,
- 0x0040045c,
- 0x004004dc,
- 0x00400460,
- 0x004004e0,
- 0x00400464,
- 0x004004e4,
- 0x00400468,
- 0x004004e8,
- 0x0040046c,
- 0x004004ec,
- 0x00400470,
- 0x004004f0,
- 0x00400474,
- 0x004004f4,
- 0x00400478,
- 0x004004f8,
- 0x0040047c,
- 0x004004fc,
- 0x00400534,
- 0x00400538,
- 0x00400514,
- 0x00400518,
- 0x0040051c,
- 0x00400520,
- 0x00400524,
- 0x00400528,
- 0x0040052c,
- 0x00400530,
- 0x00400d00,
- 0x00400d40,
- 0x00400d80,
- 0x00400d04,
- 0x00400d44,
- 0x00400d84,
- 0x00400d08,
- 0x00400d48,
- 0x00400d88,
- 0x00400d0c,
- 0x00400d4c,
- 0x00400d8c,
- 0x00400d10,
- 0x00400d50,
- 0x00400d90,
- 0x00400d14,
- 0x00400d54,
- 0x00400d94,
- 0x00400d18,
- 0x00400d58,
- 0x00400d98,
- 0x00400d1c,
- 0x00400d5c,
- 0x00400d9c,
- 0x00400d20,
- 0x00400d60,
- 0x00400da0,
- 0x00400d24,
- 0x00400d64,
- 0x00400da4,
- 0x00400d28,
- 0x00400d68,
- 0x00400da8,
- 0x00400d2c,
- 0x00400d6c,
- 0x00400dac,
- 0x00400d30,
- 0x00400d70,
- 0x00400db0,
- 0x00400d34,
- 0x00400d74,
- 0x00400db4,
- 0x00400d38,
- 0x00400d78,
- 0x00400db8,
- 0x00400d3c,
- 0x00400d7c,
- 0x00400dbc,
- 0x00400590,
- 0x00400594,
- 0x00400598,
- 0x0040059c,
- 0x004005a8,
- 0x004005ac,
- 0x004005b0,
- 0x004005b4,
- 0x004005c0,
- 0x004005c4,
- 0x004005c8,
- 0x004005cc,
- 0x004005d0,
- 0x004005d4,
- 0x004005d8,
- 0x004005dc,
- 0x004005e0,
- NV04_PGRAPH_PASSTHRU_0,
- NV04_PGRAPH_PASSTHRU_1,
- NV04_PGRAPH_PASSTHRU_2,
- NV04_PGRAPH_DVD_COLORFMT,
- NV04_PGRAPH_SCALED_FORMAT,
- NV04_PGRAPH_MISC24_0,
- NV04_PGRAPH_MISC24_1,
- NV04_PGRAPH_MISC24_2,
- 0x00400500,
- 0x00400504,
- NV04_PGRAPH_VALID1,
- NV04_PGRAPH_VALID2,
- NV04_PGRAPH_DEBUG_3
-};
-
-struct graph_state {
- uint32_t nv04[ARRAY_SIZE(nv04_graph_ctx_regs)];
-};
-
-static struct nouveau_channel *
-nv04_graph_channel(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chid = 15;
-
- if (nv_rd32(dev, NV04_PGRAPH_CTX_CONTROL) & 0x00010000)
- chid = nv_rd32(dev, NV04_PGRAPH_CTX_USER) >> 24;
-
- if (chid > 15)
- return NULL;
-
- return dev_priv->channels.ptr[chid];
-}
-
-static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
- if (nv04_graph_ctx_regs[i] == reg)
- return &ctx->nv04[i];
- }
-
- return NULL;
-}
-
-static int
-nv04_graph_load_context(struct nouveau_channel *chan)
-{
- struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
- struct drm_device *dev = chan->dev;
- uint32_t tmp;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
- nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
-
- nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
-
- tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
-
- tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
- nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
-
- return 0;
-}
-
-static int
-nv04_graph_unload_context(struct drm_device *dev)
-{
- struct nouveau_channel *chan = NULL;
- struct graph_state *ctx;
- uint32_t tmp;
- int i;
-
- chan = nv04_graph_channel(dev);
- if (!chan)
- return 0;
- ctx = chan->engctx[NVOBJ_ENGINE_GR];
-
- for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++)
- ctx->nv04[i] = nv_rd32(dev, nv04_graph_ctx_regs[i]);
-
- nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10000000);
- tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= 15 << 24;
- nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
- return 0;
-}
-
-static int
-nv04_graph_context_new(struct nouveau_channel *chan, int engine)
-{
- struct graph_state *pgraph_ctx;
- NV_DEBUG(chan->dev, "nv04_graph_context_create %d\n", chan->id);
-
- pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
- if (pgraph_ctx == NULL)
- return -ENOMEM;
-
- *ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
-
- chan->engctx[engine] = pgraph_ctx;
- return 0;
-}
-
-static void
-nv04_graph_context_del(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct graph_state *pgraph_ctx = chan->engctx[engine];
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-
- /* Unload the context if it's the currently active one */
- if (nv04_graph_channel(dev) == chan)
- nv04_graph_unload_context(dev);
-
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* Free the context resources */
- kfree(pgraph_ctx);
- chan->engctx[engine] = NULL;
-}
-
-int
-nv04_graph_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
- if (ret)
- return ret;
- obj->engine = 1;
- obj->class = class;
-
-#ifdef __BIG_ENDIAN
- nv_wo32(obj, 0x00, 0x00080000 | class);
-#else
- nv_wo32(obj, 0x00, class);
-#endif
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-
-static int
-nv04_graph_init(struct drm_device *dev, int engine)
-{
- uint32_t tmp;
-
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
- ~NV_PMC_ENABLE_PGRAPH);
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
- NV_PMC_ENABLE_PGRAPH);
-
- /* Enable PGRAPH interrupts */
- nv_wr32(dev, NV03_PGRAPH_INTR, 0xFFFFFFFF);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(dev, NV04_PGRAPH_VALID1, 0);
- nv_wr32(dev, NV04_PGRAPH_VALID2, 0);
- /*nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x000001FF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x001FFFFF);*/
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x1231c000);
- /*1231C000 blob, 001 haiku*/
- /*V_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);*/
- nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x72111100);
- /*0x72111100 blob , 01 haiku*/
- /*nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f870);*/
- nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x11d5f071);
- /*haiku same*/
-
- /*nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);
- /*haiku and blob 10d4*/
-
- nv_wr32(dev, NV04_PGRAPH_STATE , 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL , 0x10000100);
- tmp = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= 15 << 24;
- nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
-
- /* These don't belong here, they're part of a per-channel context */
- nv_wr32(dev, NV04_PGRAPH_PATTERN_SHAPE, 0x00000000);
- nv_wr32(dev, NV04_PGRAPH_BETA_AND , 0xFFFFFFFF);
-
- return 0;
-}
-
-static int
-nv04_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- return -EBUSY;
- }
- nv04_graph_unload_context(dev);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
- return 0;
-}
-
-/*
- * Software methods, why they are needed, and how they all work:
- *
- * NV04 and NV05 keep most of the state in PGRAPH context itself, but some
- * 2d engine settings are kept inside the grobjs themselves. The grobjs are
- * 3 words long on both. grobj format on NV04 is:
- *
- * word 0:
- * - bits 0-7: class
- * - bit 12: color key active
- * - bit 13: clip rect active
- * - bit 14: if set, destination surface is swizzled and taken from buffer 5
- * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
- * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
- * NV03_CONTEXT_SURFACE_DST].
- * - bits 15-17: 2d operation [aka patch config]
- * - bit 24: patch valid [enables rendering using this object]
- * - bit 25: surf3d valid [for tex_tri and multitex_tri only]
- * word 1:
- * - bits 0-1: mono format
- * - bits 8-13: color format
- * - bits 16-31: DMA_NOTIFY instance
- * word 2:
- * - bits 0-15: DMA_A instance
- * - bits 16-31: DMA_B instance
- *
- * On NV05 it's:
- *
- * word 0:
- * - bits 0-7: class
- * - bit 12: color key active
- * - bit 13: clip rect active
- * - bit 14: if set, destination surface is swizzled and taken from buffer 5
- * [set by NV04_SWIZZLED_SURFACE], otherwise it's linear and taken
- * from buffer 0 [set by NV04_CONTEXT_SURFACES_2D or
- * NV03_CONTEXT_SURFACE_DST].
- * - bits 15-17: 2d operation [aka patch config]
- * - bits 20-22: dither mode
- * - bit 24: patch valid [enables rendering using this object]
- * - bit 25: surface_dst/surface_color/surf2d/surf3d valid
- * - bit 26: surface_src/surface_zeta valid
- * - bit 27: pattern valid
- * - bit 28: rop valid
- * - bit 29: beta1 valid
- * - bit 30: beta4 valid
- * word 1:
- * - bits 0-1: mono format
- * - bits 8-13: color format
- * - bits 16-31: DMA_NOTIFY instance
- * word 2:
- * - bits 0-15: DMA_A instance
- * - bits 16-31: DMA_B instance
- *
- * NV05 will set/unset the relevant valid bits when you poke the relevant
- * object-binding methods with object of the proper type, or with the NULL
- * type. It'll only allow rendering using the grobj if all needed objects
- * are bound. The needed set of objects depends on selected operation: for
- * example rop object is needed by ROP_AND, but not by SRCCOPY_AND.
- *
- * NV04 doesn't have these methods implemented at all, and doesn't have the
- * relevant bits in grobj. Instead, it'll allow rendering whenever bit 24
- * is set. So we have to emulate them in software, internally keeping the
- * same bits as NV05 does. Since grobjs are aligned to 16 bytes on nv04,
- * but the last word isn't actually used for anything, we abuse it for this
- * purpose.
- *
- * Actually, NV05 can optionally check bit 24 too, but we disable this since
- * there's no use for it.
- *
- * For unknown reasons, NV04 implements surf3d binding in hardware as an
- * exception. Also for unknown reasons, NV04 doesn't implement the clipping
- * methods on the surf3d object, so we have to emulate them too.
- */
-
-static void
-nv04_graph_set_ctx1(struct nouveau_channel *chan, u32 mask, u32 value)
-{
- struct drm_device *dev = chan->dev;
- u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
- int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
- u32 tmp;
-
- tmp = nv_ri32(dev, instance);
- tmp &= ~mask;
- tmp |= value;
-
- nv_wi32(dev, instance, tmp);
- nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
- nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
-}
-
-static void
-nv04_graph_set_ctx_val(struct nouveau_channel *chan, u32 mask, u32 value)
-{
- struct drm_device *dev = chan->dev;
- u32 instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
- u32 tmp, ctx1;
- int class, op, valid = 1;
-
- ctx1 = nv_ri32(dev, instance);
- class = ctx1 & 0xff;
- op = (ctx1 >> 15) & 7;
- tmp = nv_ri32(dev, instance + 0xc);
- tmp &= ~mask;
- tmp |= value;
- nv_wi32(dev, instance + 0xc, tmp);
-
- /* check for valid surf2d/surf_dst/surf_color */
- if (!(tmp & 0x02000000))
- valid = 0;
- /* check for valid surf_src/surf_zeta */
- if ((class == 0x1f || class == 0x48) && !(tmp & 0x04000000))
- valid = 0;
-
- switch (op) {
- /* SRCCOPY_AND, SRCCOPY: no extra objects required */
- case 0:
- case 3:
- break;
- /* ROP_AND: requires pattern and rop */
- case 1:
- if (!(tmp & 0x18000000))
- valid = 0;
- break;
- /* BLEND_AND: requires beta1 */
- case 2:
- if (!(tmp & 0x20000000))
- valid = 0;
- break;
- /* SRCCOPY_PREMULT, BLEND_PREMULT: beta4 required */
- case 4:
- case 5:
- if (!(tmp & 0x40000000))
- valid = 0;
- break;
- }
-
- nv04_graph_set_ctx1(chan, 0x01000000, valid << 24);
-}
-
-static int
-nv04_graph_mthd_set_operation(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- if (data > 5)
- return 1;
- /* Old versions of the objects only accept first three operations. */
- if (data > 2 && class < 0x40)
- return 1;
- nv04_graph_set_ctx1(chan, 0x00038000, data << 15);
- /* changing operation changes set of objects needed for validation */
- nv04_graph_set_ctx_val(chan, 0, 0);
- return 0;
-}
-
-static int
-nv04_graph_mthd_surf3d_clip_h(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- uint32_t min = data & 0xffff, max;
- uint32_t w = data >> 16;
- if (min & 0x8000)
- /* too large */
- return 1;
- if (w & 0x8000)
- /* yes, it accepts negative for some reason. */
- w |= 0xffff0000;
- max = min + w;
- max &= 0x3ffff;
- nv_wr32(chan->dev, 0x40053c, min);
- nv_wr32(chan->dev, 0x400544, max);
- return 0;
-}
-
-static int
-nv04_graph_mthd_surf3d_clip_v(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- uint32_t min = data & 0xffff, max;
- uint32_t w = data >> 16;
- if (min & 0x8000)
- /* too large */
- return 1;
- if (w & 0x8000)
- /* yes, it accepts negative for some reason. */
- w |= 0xffff0000;
- max = min + w;
- max &= 0x3ffff;
- nv_wr32(chan->dev, 0x400540, min);
- nv_wr32(chan->dev, 0x400548, max);
- return 0;
-}
-
-static int
-nv04_graph_mthd_bind_surf2d(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx1(chan, 0x00004000, 0);
- nv04_graph_set_ctx_val(chan, 0x02000000, 0);
- return 0;
- case 0x42:
- nv04_graph_set_ctx1(chan, 0x00004000, 0);
- nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_surf2d_swzsurf(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx1(chan, 0x00004000, 0);
- nv04_graph_set_ctx_val(chan, 0x02000000, 0);
- return 0;
- case 0x42:
- nv04_graph_set_ctx1(chan, 0x00004000, 0);
- nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
- return 0;
- case 0x52:
- nv04_graph_set_ctx1(chan, 0x00004000, 0x00004000);
- nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_nv01_patt(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx_val(chan, 0x08000000, 0);
- return 0;
- case 0x18:
- nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_nv04_patt(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx_val(chan, 0x08000000, 0);
- return 0;
- case 0x44:
- nv04_graph_set_ctx_val(chan, 0x08000000, 0x08000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_rop(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx_val(chan, 0x10000000, 0);
- return 0;
- case 0x43:
- nv04_graph_set_ctx_val(chan, 0x10000000, 0x10000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_beta1(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx_val(chan, 0x20000000, 0);
- return 0;
- case 0x12:
- nv04_graph_set_ctx_val(chan, 0x20000000, 0x20000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_beta4(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx_val(chan, 0x40000000, 0);
- return 0;
- case 0x72:
- nv04_graph_set_ctx_val(chan, 0x40000000, 0x40000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_surf_dst(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx_val(chan, 0x02000000, 0);
- return 0;
- case 0x58:
- nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_surf_src(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx_val(chan, 0x04000000, 0);
- return 0;
- case 0x59:
- nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_surf_color(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx_val(chan, 0x02000000, 0);
- return 0;
- case 0x5a:
- nv04_graph_set_ctx_val(chan, 0x02000000, 0x02000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_surf_zeta(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx_val(chan, 0x04000000, 0);
- return 0;
- case 0x5b:
- nv04_graph_set_ctx_val(chan, 0x04000000, 0x04000000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_clip(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx1(chan, 0x2000, 0);
- return 0;
- case 0x19:
- nv04_graph_set_ctx1(chan, 0x2000, 0x2000);
- return 0;
- }
- return 1;
-}
-
-static int
-nv04_graph_mthd_bind_chroma(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- switch (nv_ri32(chan->dev, data << 4) & 0xff) {
- case 0x30:
- nv04_graph_set_ctx1(chan, 0x1000, 0);
- return 0;
- /* Yes, for some reason even the old versions of objects
- * accept 0x57 and not 0x17. Consistency be damned.
- */
- case 0x57:
- nv04_graph_set_ctx1(chan, 0x1000, 0x1000);
- return 0;
- }
- return 1;
-}
-
-static struct nouveau_bitfield nv04_graph_intr[] = {
- { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
- {}
-};
-
-static struct nouveau_bitfield nv04_graph_nstatus[] = {
- { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
- { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
- { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
- { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
- {}
-};
-
-struct nouveau_bitfield nv04_graph_nsource[] = {
- { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
- { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
- { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
- { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
- { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
- { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
- { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
- { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
- { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
- { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
- { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
- { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
- { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
- { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
- { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
- {}
-};
-
-static void
-nv04_graph_context_switch(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
- int chid;
-
- nouveau_wait_for_idle(dev);
-
- /* If previous context is valid, we need to save it */
- nv04_graph_unload_context(dev);
-
- /* Load context for next channel */
- chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
- NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
- chan = dev_priv->channels.ptr[chid];
- if (chan)
- nv04_graph_load_context(chan);
-}
-
-static void
-nv04_graph_isr(struct drm_device *dev)
-{
- u32 stat;
-
- while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
- u32 chid = (addr & 0x0f000000) >> 24;
- u32 subc = (addr & 0x0000e000) >> 13;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(dev, 0x400180 + subc * 4) & 0xff;
- u32 show = stat;
-
- if (stat & NV_PGRAPH_INTR_NOTIFY) {
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
- show &= ~NV_PGRAPH_INTR_NOTIFY;
- }
- }
-
- if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
- stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv04_graph_context_switch(dev);
- }
-
- nv_wr32(dev, NV03_PGRAPH_INTR, stat);
- nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PGRAPH -");
- nouveau_bitfield_print(nv04_graph_intr, show);
- printk(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
- nouveau_bitfield_print(nv04_graph_nstatus, nstatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
- }
- }
-}
-
-static void
-nv04_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nv04_graph_engine *pgraph = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 12);
-
- NVOBJ_ENGINE_DEL(dev, GR);
- kfree(pgraph);
-}
-
-int
-nv04_graph_create(struct drm_device *dev)
-{
- struct nv04_graph_engine *pgraph;
-
- pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
- if (!pgraph)
- return -ENOMEM;
-
- pgraph->base.destroy = nv04_graph_destroy;
- pgraph->base.init = nv04_graph_init;
- pgraph->base.fini = nv04_graph_fini;
- pgraph->base.context_new = nv04_graph_context_new;
- pgraph->base.context_del = nv04_graph_context_del;
- pgraph->base.object_new = nv04_graph_object_new;
-
- NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
- nouveau_irq_register(dev, 12, nv04_graph_isr);
-
- /* dvd subpicture */
- NVOBJ_CLASS(dev, 0x0038, GR);
-
- /* m2mf */
- NVOBJ_CLASS(dev, 0x0039, GR);
-
- /* nv03 gdirect */
- NVOBJ_CLASS(dev, 0x004b, GR);
- NVOBJ_MTHD (dev, 0x004b, 0x0184, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x004b, 0x0188, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x004b, 0x018c, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x004b, 0x0190, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x004b, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 gdirect */
- NVOBJ_CLASS(dev, 0x004a, GR);
- NVOBJ_MTHD (dev, 0x004a, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x004a, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x004a, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x004a, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x004a, 0x0198, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x004a, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv01 imageblit */
- NVOBJ_CLASS(dev, 0x001f, GR);
- NVOBJ_MTHD (dev, 0x001f, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x001f, 0x0188, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x001f, 0x018c, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x001f, 0x0190, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x001f, 0x0194, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x001f, 0x0198, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x001f, 0x019c, nv04_graph_mthd_bind_surf_src);
- NVOBJ_MTHD (dev, 0x001f, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 imageblit */
- NVOBJ_CLASS(dev, 0x005f, GR);
- NVOBJ_MTHD (dev, 0x005f, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x005f, 0x0188, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x005f, 0x018c, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x005f, 0x0190, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x005f, 0x0194, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x005f, 0x0198, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x005f, 0x019c, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x005f, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 iifc */
- NVOBJ_CLASS(dev, 0x0060, GR);
- NVOBJ_MTHD (dev, 0x0060, 0x0188, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x0060, 0x018c, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x0060, 0x0190, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x0060, 0x0194, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0060, 0x0198, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0060, 0x019c, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x0060, 0x01a0, nv04_graph_mthd_bind_surf2d_swzsurf);
- NVOBJ_MTHD (dev, 0x0060, 0x03e4, nv04_graph_mthd_set_operation);
-
- /* nv05 iifc */
- NVOBJ_CLASS(dev, 0x0064, GR);
-
- /* nv01 ifc */
- NVOBJ_CLASS(dev, 0x0021, GR);
- NVOBJ_MTHD (dev, 0x0021, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x0021, 0x0188, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x0021, 0x018c, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x0021, 0x0190, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0021, 0x0194, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0021, 0x0198, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x0021, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 ifc */
- NVOBJ_CLASS(dev, 0x0061, GR);
- NVOBJ_MTHD (dev, 0x0061, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x0061, 0x0188, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x0061, 0x018c, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x0061, 0x0190, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0061, 0x0194, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0061, 0x0198, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x0061, 0x019c, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x0061, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv05 ifc */
- NVOBJ_CLASS(dev, 0x0065, GR);
-
- /* nv03 sifc */
- NVOBJ_CLASS(dev, 0x0036, GR);
- NVOBJ_MTHD (dev, 0x0036, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x0036, 0x0188, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x0036, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0036, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0036, 0x0194, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x0036, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 sifc */
- NVOBJ_CLASS(dev, 0x0076, GR);
- NVOBJ_MTHD (dev, 0x0076, 0x0184, nv04_graph_mthd_bind_chroma);
- NVOBJ_MTHD (dev, 0x0076, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x0076, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0076, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0076, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x0076, 0x0198, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x0076, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv05 sifc */
- NVOBJ_CLASS(dev, 0x0066, GR);
-
- /* nv03 sifm */
- NVOBJ_CLASS(dev, 0x0037, GR);
- NVOBJ_MTHD (dev, 0x0037, 0x0188, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x0037, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0037, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0037, 0x0194, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x0037, 0x0304, nv04_graph_mthd_set_operation);
-
- /* nv04 sifm */
- NVOBJ_CLASS(dev, 0x0077, GR);
- NVOBJ_MTHD (dev, 0x0077, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x0077, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x0077, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x0077, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x0077, 0x0198, nv04_graph_mthd_bind_surf2d_swzsurf);
- NVOBJ_MTHD (dev, 0x0077, 0x0304, nv04_graph_mthd_set_operation);
-
- /* null */
- NVOBJ_CLASS(dev, 0x0030, GR);
-
- /* surf2d */
- NVOBJ_CLASS(dev, 0x0042, GR);
-
- /* rop */
- NVOBJ_CLASS(dev, 0x0043, GR);
-
- /* beta1 */
- NVOBJ_CLASS(dev, 0x0012, GR);
-
- /* beta4 */
- NVOBJ_CLASS(dev, 0x0072, GR);
-
- /* cliprect */
- NVOBJ_CLASS(dev, 0x0019, GR);
-
- /* nv01 pattern */
- NVOBJ_CLASS(dev, 0x0018, GR);
-
- /* nv04 pattern */
- NVOBJ_CLASS(dev, 0x0044, GR);
-
- /* swzsurf */
- NVOBJ_CLASS(dev, 0x0052, GR);
-
- /* surf3d */
- NVOBJ_CLASS(dev, 0x0053, GR);
- NVOBJ_MTHD (dev, 0x0053, 0x02f8, nv04_graph_mthd_surf3d_clip_h);
- NVOBJ_MTHD (dev, 0x0053, 0x02fc, nv04_graph_mthd_surf3d_clip_v);
-
- /* nv03 tex_tri */
- NVOBJ_CLASS(dev, 0x0048, GR);
- NVOBJ_MTHD (dev, 0x0048, 0x0188, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x0048, 0x018c, nv04_graph_mthd_bind_surf_color);
- NVOBJ_MTHD (dev, 0x0048, 0x0190, nv04_graph_mthd_bind_surf_zeta);
-
- /* tex_tri */
- NVOBJ_CLASS(dev, 0x0054, GR);
-
- /* multitex_tri */
- NVOBJ_CLASS(dev, 0x0055, GR);
-
- /* nv01 chroma */
- NVOBJ_CLASS(dev, 0x0017, GR);
-
- /* nv04 chroma */
- NVOBJ_CLASS(dev, 0x0057, GR);
-
- /* surf_dst */
- NVOBJ_CLASS(dev, 0x0058, GR);
-
- /* surf_src */
- NVOBJ_CLASS(dev, 0x0059, GR);
-
- /* surf_color */
- NVOBJ_CLASS(dev, 0x005a, GR);
-
- /* surf_zeta */
- NVOBJ_CLASS(dev, 0x005b, GR);
-
- /* nv01 line */
- NVOBJ_CLASS(dev, 0x001c, GR);
- NVOBJ_MTHD (dev, 0x001c, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x001c, 0x0188, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x001c, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x001c, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x001c, 0x0194, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x001c, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 line */
- NVOBJ_CLASS(dev, 0x005c, GR);
- NVOBJ_MTHD (dev, 0x005c, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x005c, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x005c, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x005c, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x005c, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x005c, 0x0198, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x005c, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv01 tri */
- NVOBJ_CLASS(dev, 0x001d, GR);
- NVOBJ_MTHD (dev, 0x001d, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x001d, 0x0188, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x001d, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x001d, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x001d, 0x0194, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x001d, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 tri */
- NVOBJ_CLASS(dev, 0x005d, GR);
- NVOBJ_MTHD (dev, 0x005d, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x005d, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x005d, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x005d, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x005d, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x005d, 0x0198, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x005d, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv01 rect */
- NVOBJ_CLASS(dev, 0x001e, GR);
- NVOBJ_MTHD (dev, 0x001e, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x001e, 0x0188, nv04_graph_mthd_bind_nv01_patt);
- NVOBJ_MTHD (dev, 0x001e, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x001e, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x001e, 0x0194, nv04_graph_mthd_bind_surf_dst);
- NVOBJ_MTHD (dev, 0x001e, 0x02fc, nv04_graph_mthd_set_operation);
-
- /* nv04 rect */
- NVOBJ_CLASS(dev, 0x005e, GR);
- NVOBJ_MTHD (dev, 0x005e, 0x0184, nv04_graph_mthd_bind_clip);
- NVOBJ_MTHD (dev, 0x005e, 0x0188, nv04_graph_mthd_bind_nv04_patt);
- NVOBJ_MTHD (dev, 0x005e, 0x018c, nv04_graph_mthd_bind_rop);
- NVOBJ_MTHD (dev, 0x005e, 0x0190, nv04_graph_mthd_bind_beta1);
- NVOBJ_MTHD (dev, 0x005e, 0x0194, nv04_graph_mthd_bind_beta4);
- NVOBJ_MTHD (dev, 0x005e, 0x0198, nv04_graph_mthd_bind_surf2d);
- NVOBJ_MTHD (dev, 0x005e, 0x02fc, nv04_graph_mthd_set_operation);
-
- return 0;
-}
+++ /dev/null
-#include "drmP.h"
-#include "drm.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-
-/* returns the size of fifo context */
-static int
-nouveau_fifo_ctx_size(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (dev_priv->chipset >= 0x40)
- return 128 * 32;
- else
- if (dev_priv->chipset >= 0x17)
- return 64 * 32;
- else
- if (dev_priv->chipset >= 0x10)
- return 32 * 32;
-
- return 32 * 16;
-}
-
-int nv04_instmem_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramht = NULL;
- u32 offset, length;
- int ret;
-
- /* RAMIN always available */
- dev_priv->ramin_available = true;
-
- /* Reserve space at end of VRAM for PRAMIN */
- if (dev_priv->card_type >= NV_40) {
- u32 vs = hweight8((nv_rd32(dev, 0x001540) & 0x0000ff00) >> 8);
- u32 rsvd;
-
- /* estimate grctx size, the magics come from nv40_grctx.c */
- if (dev_priv->chipset == 0x40) rsvd = 0x6aa0 * vs;
- else if (dev_priv->chipset < 0x43) rsvd = 0x4f00 * vs;
- else if (nv44_graph_class(dev)) rsvd = 0x4980 * vs;
- else rsvd = 0x4a40 * vs;
- rsvd += 16 * 1024;
- rsvd *= 32; /* per-channel */
-
- rsvd += 512 * 1024; /* pci(e)gart table */
- rsvd += 512 * 1024; /* object storage */
-
- dev_priv->ramin_rsvd_vram = round_up(rsvd, 4096);
- } else {
- dev_priv->ramin_rsvd_vram = 512 * 1024;
- }
-
- /* Setup shared RAMHT */
- ret = nouveau_gpuobj_new_fake(dev, 0x10000, ~0, 4096,
- NVOBJ_FLAG_ZERO_ALLOC, &ramht);
- if (ret)
- return ret;
-
- ret = nouveau_ramht_new(dev, ramht, &dev_priv->ramht);
- nouveau_gpuobj_ref(NULL, &ramht);
- if (ret)
- return ret;
-
- /* And RAMRO */
- ret = nouveau_gpuobj_new_fake(dev, 0x11200, ~0, 512,
- NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramro);
- if (ret)
- return ret;
-
- /* And RAMFC */
- length = nouveau_fifo_ctx_size(dev);
- switch (dev_priv->card_type) {
- case NV_40:
- offset = 0x20000;
- break;
- default:
- offset = 0x11400;
- break;
- }
-
- ret = nouveau_gpuobj_new_fake(dev, offset, ~0, length,
- NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->ramfc);
- if (ret)
- return ret;
-
- /* Only allow space after RAMFC to be used for object allocation */
- offset += length;
-
- /* It appears RAMRO (or something?) is controlled by 0x2220/0x2230
- * on certain NV4x chipsets as well as RAMFC. When 0x2230 == 0
- * ("new style" control) the upper 16-bits of 0x2220 points at this
- * other mysterious table that's clobbering important things.
- *
- * We're now pointing this at RAMIN+0x30000 to avoid RAMFC getting
- * smashed to pieces on us, so reserve 0x30000-0x40000 too..
- */
- if (dev_priv->card_type >= NV_40) {
- if (offset < 0x40000)
- offset = 0x40000;
- }
-
- ret = drm_mm_init(&dev_priv->ramin_heap, offset,
- dev_priv->ramin_rsvd_vram - offset);
- if (ret) {
- NV_ERROR(dev, "Failed to init RAMIN heap: %d\n", ret);
- return ret;
- }
-
- return 0;
-}
-
-void
-nv04_instmem_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- nouveau_ramht_ref(NULL, &dev_priv->ramht, NULL);
- nouveau_gpuobj_ref(NULL, &dev_priv->ramro);
- nouveau_gpuobj_ref(NULL, &dev_priv->ramfc);
-
- if (drm_mm_initialized(&dev_priv->ramin_heap))
- drm_mm_takedown(&dev_priv->ramin_heap);
-}
-
-int
-nv04_instmem_suspend(struct drm_device *dev)
-{
- return 0;
-}
-
-void
-nv04_instmem_resume(struct drm_device *dev)
-{
-}
-
-int
-nv04_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
- u32 size, u32 align)
-{
- struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
- struct drm_mm_node *ramin = NULL;
-
- do {
- if (drm_mm_pre_get(&dev_priv->ramin_heap))
- return -ENOMEM;
-
- spin_lock(&dev_priv->ramin_lock);
- ramin = drm_mm_search_free(&dev_priv->ramin_heap, size, align, 0);
- if (ramin == NULL) {
- spin_unlock(&dev_priv->ramin_lock);
- return -ENOMEM;
- }
-
- ramin = drm_mm_get_block_atomic(ramin, size, align);
- spin_unlock(&dev_priv->ramin_lock);
- } while (ramin == NULL);
-
- gpuobj->node = ramin;
- gpuobj->vinst = ramin->start;
- return 0;
-}
-
-void
-nv04_instmem_put(struct nouveau_gpuobj *gpuobj)
-{
- struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
-
- spin_lock(&dev_priv->ramin_lock);
- drm_mm_put_block(gpuobj->node);
- gpuobj->node = NULL;
- spin_unlock(&dev_priv->ramin_lock);
-}
-
-int
-nv04_instmem_map(struct nouveau_gpuobj *gpuobj)
-{
- gpuobj->pinst = gpuobj->vinst;
- return 0;
-}
-
-void
-nv04_instmem_unmap(struct nouveau_gpuobj *gpuobj)
-{
-}
-
-void
-nv04_instmem_flush(struct drm_device *dev)
-{
-}
+++ /dev/null
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-int
-nv04_mc_init(struct drm_device *dev)
-{
- /* Power up everything, resetting each individual unit will
- * be done later if needed.
- */
-
- nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
-
- /* Disable PROM access. */
- nv_wr32(dev, NV_PBUS_PCI_NV_20, NV_PBUS_PCI_NV_20_ROM_SHADOW_ENABLED);
-
- return 0;
-}
-
-void
-nv04_mc_takedown(struct drm_device *dev)
-{
-}
#include "drmP.h"
#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_fence.h"
#include "nouveau_software.h"
#include "nouveau_hw.h"
+++ /dev/null
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-#include "nouveau_hw.h"
-
-int
-nv04_timer_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 m, n, d;
-
- nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
- nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
-
- /* aim for 31.25MHz, which gives us nanosecond timestamps */
- d = 1000000 / 32;
-
- /* determine base clock for timer source */
- if (dev_priv->chipset < 0x40) {
- n = nouveau_hw_get_clock(dev, PLL_CORE);
- } else
- if (dev_priv->chipset == 0x40) {
- /*XXX: figure this out */
- n = 0;
- } else {
- n = dev_priv->crystal;
- m = 1;
- while (n < (d * 2)) {
- n += (n / m);
- m++;
- }
-
- nv_wr32(dev, 0x009220, m - 1);
- }
-
- if (!n) {
- NV_WARN(dev, "PTIMER: unknown input clock freq\n");
- if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
- !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
- nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1);
- nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1);
- }
- return 0;
- }
-
- /* reduce ratio to acceptable values */
- while (((n % 5) == 0) && ((d % 5) == 0)) {
- n /= 5;
- d /= 5;
- }
-
- while (((n % 2) == 0) && ((d % 2) == 0)) {
- n /= 2;
- d /= 2;
- }
-
- while (n > 0xffff || d > 0xffff) {
- n >>= 1;
- d >>= 1;
- }
-
- nv_wr32(dev, NV04_PTIMER_NUMERATOR, n);
- nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d);
- return 0;
-}
-
-u64
-nv04_timer_read(struct drm_device *dev)
-{
- u32 hi, lo;
-
- do {
- hi = nv_rd32(dev, NV04_PTIMER_TIME_1);
- lo = nv_rd32(dev, NV04_PTIMER_TIME_0);
- } while (hi != nv_rd32(dev, NV04_PTIMER_TIME_1));
-
- return ((u64)hi << 32 | lo);
-}
-
-void
-nv04_timer_takedown(struct drm_device *dev)
-{
-}
+++ /dev/null
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-void
-nv10_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch, uint32_t flags)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- tile->addr = 0x80000000 | addr;
- tile->limit = max(1u, addr + size) - 1;
- tile->pitch = pitch;
-}
-
-void
-nv10_fb_free_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
-}
-
-void
-nv10_fb_set_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
-}
-
-int
-nv1a_fb_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct pci_dev *bridge;
- uint32_t mem, mib;
-
- bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 1));
- if (!bridge) {
- NV_ERROR(dev, "no bridge device\n");
- return 0;
- }
-
- if (dev_priv->chipset == 0x1a) {
- pci_read_config_dword(bridge, 0x7c, &mem);
- mib = ((mem >> 6) & 31) + 1;
- } else {
- pci_read_config_dword(bridge, 0x84, &mem);
- mib = ((mem >> 4) & 127) + 1;
- }
-
- dev_priv->vram_size = mib * 1024 * 1024;
- return 0;
-}
-
-int
-nv10_fb_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 fifo_data = nv_rd32(dev, NV04_PFB_FIFO_DATA);
- u32 cfg0 = nv_rd32(dev, 0x100200);
-
- dev_priv->vram_size = fifo_data & NV10_PFB_FIFO_DATA_RAM_AMOUNT_MB_MASK;
-
- if (cfg0 & 0x00000001)
- dev_priv->vram_type = NV_MEM_TYPE_DDR1;
- else
- dev_priv->vram_type = NV_MEM_TYPE_SDRAM;
-
- return 0;
-}
-
-int
-nv10_fb_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- int i;
-
- /* Turn all the tiling regions off. */
- pfb->num_tiles = NV10_PFB_TILE__SIZE;
- for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_tile_region(dev, i);
-
- return 0;
-}
-
-void
-nv10_fb_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- int i;
-
- for (i = 0; i < pfb->num_tiles; i++)
- pfb->free_tile_region(dev, i);
-}
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_fence.h"
struct nv10_fence_chan {
+++ /dev/null
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_util.h"
-#include "nouveau_ramht.h"
-
-static struct ramfc_desc {
- unsigned bits:6;
- unsigned ctxs:5;
- unsigned ctxp:8;
- unsigned regs:5;
- unsigned regp;
-} nv10_ramfc[] = {
- { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
- { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
- { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
- { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
- { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
- { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
- { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
- { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
- { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
- {}
-};
-
-struct nv10_fifo_priv {
- struct nouveau_fifo_priv base;
- struct ramfc_desc *ramfc_desc;
-};
-
-struct nv10_fifo_chan {
- struct nouveau_fifo_chan base;
- struct nouveau_gpuobj *ramfc;
-};
-
-static int
-nv10_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv10_fifo_priv *priv = nv_engine(dev, engine);
- struct nv10_fifo_chan *fctx;
- unsigned long flags;
- int ret;
-
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- /* map channel control registers */
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* initialise default fifo context */
- ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
- chan->id * 32, ~0, 32,
- NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
- if (ret)
- goto error;
-
- nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
- nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
- nv_wo32(fctx->ramfc, 0x08, 0x00000000);
- nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
- nv_wo32(fctx->ramfc, 0x10, 0x00000000);
- nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
- nv_wo32(fctx->ramfc, 0x18, 0x00000000);
- nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
-
- /* enable dma mode on the channel */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
-}
-
-int
-nv10_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv10_fifo_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.base.destroy = nv04_fifo_destroy;
- priv->base.base.init = nv04_fifo_init;
- priv->base.base.fini = nv04_fifo_fini;
- priv->base.base.context_new = nv10_fifo_context_new;
- priv->base.base.context_del = nv04_fifo_context_del;
- priv->base.channels = 31;
- priv->ramfc_desc = nv10_ramfc;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- return 0;
-}
+++ /dev/null
-/*
- * Copyright (C) 2009 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_hw.h"
-#include "nouveau_gpio.h"
-
-int
-nv10_gpio_sense(struct drm_device *dev, int line)
-{
- if (line < 2) {
- line = line * 16;
- line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO) >> line;
- return !!(line & 0x0100);
- } else
- if (line < 10) {
- line = (line - 2) * 4;
- line = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT) >> line;
- return !!(line & 0x04);
- } else
- if (line < 14) {
- line = (line - 10) * 4;
- line = NVReadCRTC(dev, 0, NV_PCRTC_850) >> line;
- return !!(line & 0x04);
- }
-
- return -EINVAL;
-}
-
-int
-nv10_gpio_drive(struct drm_device *dev, int line, int dir, int out)
-{
- u32 reg, mask, data;
-
- if (line < 2) {
- line = line * 16;
- reg = NV_PCRTC_GPIO;
- mask = 0x00000011;
- data = (dir << 4) | out;
- } else
- if (line < 10) {
- line = (line - 2) * 4;
- reg = NV_PCRTC_GPIO_EXT;
- mask = 0x00000003;
- data = (dir << 1) | out;
- } else
- if (line < 14) {
- line = (line - 10) * 4;
- reg = NV_PCRTC_850;
- mask = 0x00000003;
- data = (dir << 1) | out;
- } else {
- return -EINVAL;
- }
-
- mask = NVReadCRTC(dev, 0, reg) & ~(mask << line);
- NVWriteCRTC(dev, 0, reg, mask | (data << line));
- return 0;
-}
-
-void
-nv10_gpio_irq_enable(struct drm_device *dev, int line, bool on)
-{
- u32 mask = 0x00010001 << line;
-
- nv_wr32(dev, 0x001104, mask);
- nv_mask(dev, 0x001144, mask, on ? mask : 0);
-}
-
-static void
-nv10_gpio_isr(struct drm_device *dev)
-{
- u32 intr = nv_rd32(dev, 0x1104);
- u32 hi = (intr & 0x0000ffff) >> 0;
- u32 lo = (intr & 0xffff0000) >> 16;
-
- nouveau_gpio_isr(dev, 0, hi | lo);
-
- nv_wr32(dev, 0x001104, intr);
-}
-
-int
-nv10_gpio_init(struct drm_device *dev)
-{
- nv_wr32(dev, 0x001140, 0x00000000);
- nv_wr32(dev, 0x001100, 0xffffffff);
- nv_wr32(dev, 0x001144, 0x00000000);
- nv_wr32(dev, 0x001104, 0xffffffff);
- nouveau_irq_register(dev, 28, nv10_gpio_isr); /* PBUS */
- return 0;
-}
-
-void
-nv10_gpio_fini(struct drm_device *dev)
-{
- nv_wr32(dev, 0x001140, 0x00000000);
- nv_wr32(dev, 0x001144, 0x00000000);
- nouveau_irq_unregister(dev, 28);
-}
+++ /dev/null
-/*
- * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-
-struct nv10_graph_engine {
- struct nouveau_exec_engine base;
-};
-
-struct pipe_state {
- uint32_t pipe_0x0000[0x040/4];
- uint32_t pipe_0x0040[0x010/4];
- uint32_t pipe_0x0200[0x0c0/4];
- uint32_t pipe_0x4400[0x080/4];
- uint32_t pipe_0x6400[0x3b0/4];
- uint32_t pipe_0x6800[0x2f0/4];
- uint32_t pipe_0x6c00[0x030/4];
- uint32_t pipe_0x7000[0x130/4];
- uint32_t pipe_0x7400[0x0c0/4];
- uint32_t pipe_0x7800[0x0c0/4];
-};
-
-static int nv10_graph_ctx_regs[] = {
- NV10_PGRAPH_CTX_SWITCH(0),
- NV10_PGRAPH_CTX_SWITCH(1),
- NV10_PGRAPH_CTX_SWITCH(2),
- NV10_PGRAPH_CTX_SWITCH(3),
- NV10_PGRAPH_CTX_SWITCH(4),
- NV10_PGRAPH_CTX_CACHE(0, 0),
- NV10_PGRAPH_CTX_CACHE(0, 1),
- NV10_PGRAPH_CTX_CACHE(0, 2),
- NV10_PGRAPH_CTX_CACHE(0, 3),
- NV10_PGRAPH_CTX_CACHE(0, 4),
- NV10_PGRAPH_CTX_CACHE(1, 0),
- NV10_PGRAPH_CTX_CACHE(1, 1),
- NV10_PGRAPH_CTX_CACHE(1, 2),
- NV10_PGRAPH_CTX_CACHE(1, 3),
- NV10_PGRAPH_CTX_CACHE(1, 4),
- NV10_PGRAPH_CTX_CACHE(2, 0),
- NV10_PGRAPH_CTX_CACHE(2, 1),
- NV10_PGRAPH_CTX_CACHE(2, 2),
- NV10_PGRAPH_CTX_CACHE(2, 3),
- NV10_PGRAPH_CTX_CACHE(2, 4),
- NV10_PGRAPH_CTX_CACHE(3, 0),
- NV10_PGRAPH_CTX_CACHE(3, 1),
- NV10_PGRAPH_CTX_CACHE(3, 2),
- NV10_PGRAPH_CTX_CACHE(3, 3),
- NV10_PGRAPH_CTX_CACHE(3, 4),
- NV10_PGRAPH_CTX_CACHE(4, 0),
- NV10_PGRAPH_CTX_CACHE(4, 1),
- NV10_PGRAPH_CTX_CACHE(4, 2),
- NV10_PGRAPH_CTX_CACHE(4, 3),
- NV10_PGRAPH_CTX_CACHE(4, 4),
- NV10_PGRAPH_CTX_CACHE(5, 0),
- NV10_PGRAPH_CTX_CACHE(5, 1),
- NV10_PGRAPH_CTX_CACHE(5, 2),
- NV10_PGRAPH_CTX_CACHE(5, 3),
- NV10_PGRAPH_CTX_CACHE(5, 4),
- NV10_PGRAPH_CTX_CACHE(6, 0),
- NV10_PGRAPH_CTX_CACHE(6, 1),
- NV10_PGRAPH_CTX_CACHE(6, 2),
- NV10_PGRAPH_CTX_CACHE(6, 3),
- NV10_PGRAPH_CTX_CACHE(6, 4),
- NV10_PGRAPH_CTX_CACHE(7, 0),
- NV10_PGRAPH_CTX_CACHE(7, 1),
- NV10_PGRAPH_CTX_CACHE(7, 2),
- NV10_PGRAPH_CTX_CACHE(7, 3),
- NV10_PGRAPH_CTX_CACHE(7, 4),
- NV10_PGRAPH_CTX_USER,
- NV04_PGRAPH_DMA_START_0,
- NV04_PGRAPH_DMA_START_1,
- NV04_PGRAPH_DMA_LENGTH,
- NV04_PGRAPH_DMA_MISC,
- NV10_PGRAPH_DMA_PITCH,
- NV04_PGRAPH_BOFFSET0,
- NV04_PGRAPH_BBASE0,
- NV04_PGRAPH_BLIMIT0,
- NV04_PGRAPH_BOFFSET1,
- NV04_PGRAPH_BBASE1,
- NV04_PGRAPH_BLIMIT1,
- NV04_PGRAPH_BOFFSET2,
- NV04_PGRAPH_BBASE2,
- NV04_PGRAPH_BLIMIT2,
- NV04_PGRAPH_BOFFSET3,
- NV04_PGRAPH_BBASE3,
- NV04_PGRAPH_BLIMIT3,
- NV04_PGRAPH_BOFFSET4,
- NV04_PGRAPH_BBASE4,
- NV04_PGRAPH_BLIMIT4,
- NV04_PGRAPH_BOFFSET5,
- NV04_PGRAPH_BBASE5,
- NV04_PGRAPH_BLIMIT5,
- NV04_PGRAPH_BPITCH0,
- NV04_PGRAPH_BPITCH1,
- NV04_PGRAPH_BPITCH2,
- NV04_PGRAPH_BPITCH3,
- NV04_PGRAPH_BPITCH4,
- NV10_PGRAPH_SURFACE,
- NV10_PGRAPH_STATE,
- NV04_PGRAPH_BSWIZZLE2,
- NV04_PGRAPH_BSWIZZLE5,
- NV04_PGRAPH_BPIXEL,
- NV10_PGRAPH_NOTIFY,
- NV04_PGRAPH_PATT_COLOR0,
- NV04_PGRAPH_PATT_COLOR1,
- NV04_PGRAPH_PATT_COLORRAM, /* 64 values from 0x400900 to 0x4009fc */
- 0x00400904,
- 0x00400908,
- 0x0040090c,
- 0x00400910,
- 0x00400914,
- 0x00400918,
- 0x0040091c,
- 0x00400920,
- 0x00400924,
- 0x00400928,
- 0x0040092c,
- 0x00400930,
- 0x00400934,
- 0x00400938,
- 0x0040093c,
- 0x00400940,
- 0x00400944,
- 0x00400948,
- 0x0040094c,
- 0x00400950,
- 0x00400954,
- 0x00400958,
- 0x0040095c,
- 0x00400960,
- 0x00400964,
- 0x00400968,
- 0x0040096c,
- 0x00400970,
- 0x00400974,
- 0x00400978,
- 0x0040097c,
- 0x00400980,
- 0x00400984,
- 0x00400988,
- 0x0040098c,
- 0x00400990,
- 0x00400994,
- 0x00400998,
- 0x0040099c,
- 0x004009a0,
- 0x004009a4,
- 0x004009a8,
- 0x004009ac,
- 0x004009b0,
- 0x004009b4,
- 0x004009b8,
- 0x004009bc,
- 0x004009c0,
- 0x004009c4,
- 0x004009c8,
- 0x004009cc,
- 0x004009d0,
- 0x004009d4,
- 0x004009d8,
- 0x004009dc,
- 0x004009e0,
- 0x004009e4,
- 0x004009e8,
- 0x004009ec,
- 0x004009f0,
- 0x004009f4,
- 0x004009f8,
- 0x004009fc,
- NV04_PGRAPH_PATTERN, /* 2 values from 0x400808 to 0x40080c */
- 0x0040080c,
- NV04_PGRAPH_PATTERN_SHAPE,
- NV03_PGRAPH_MONO_COLOR0,
- NV04_PGRAPH_ROP3,
- NV04_PGRAPH_CHROMA,
- NV04_PGRAPH_BETA_AND,
- NV04_PGRAPH_BETA_PREMULT,
- 0x00400e70,
- 0x00400e74,
- 0x00400e78,
- 0x00400e7c,
- 0x00400e80,
- 0x00400e84,
- 0x00400e88,
- 0x00400e8c,
- 0x00400ea0,
- 0x00400ea4,
- 0x00400ea8,
- 0x00400e90,
- 0x00400e94,
- 0x00400e98,
- 0x00400e9c,
- NV10_PGRAPH_WINDOWCLIP_HORIZONTAL, /* 8 values from 0x400f00-0x400f1c */
- NV10_PGRAPH_WINDOWCLIP_VERTICAL, /* 8 values from 0x400f20-0x400f3c */
- 0x00400f04,
- 0x00400f24,
- 0x00400f08,
- 0x00400f28,
- 0x00400f0c,
- 0x00400f2c,
- 0x00400f10,
- 0x00400f30,
- 0x00400f14,
- 0x00400f34,
- 0x00400f18,
- 0x00400f38,
- 0x00400f1c,
- 0x00400f3c,
- NV10_PGRAPH_XFMODE0,
- NV10_PGRAPH_XFMODE1,
- NV10_PGRAPH_GLOBALSTATE0,
- NV10_PGRAPH_GLOBALSTATE1,
- NV04_PGRAPH_STORED_FMT,
- NV04_PGRAPH_SOURCE_COLOR,
- NV03_PGRAPH_ABS_X_RAM, /* 32 values from 0x400400 to 0x40047c */
- NV03_PGRAPH_ABS_Y_RAM, /* 32 values from 0x400480 to 0x4004fc */
- 0x00400404,
- 0x00400484,
- 0x00400408,
- 0x00400488,
- 0x0040040c,
- 0x0040048c,
- 0x00400410,
- 0x00400490,
- 0x00400414,
- 0x00400494,
- 0x00400418,
- 0x00400498,
- 0x0040041c,
- 0x0040049c,
- 0x00400420,
- 0x004004a0,
- 0x00400424,
- 0x004004a4,
- 0x00400428,
- 0x004004a8,
- 0x0040042c,
- 0x004004ac,
- 0x00400430,
- 0x004004b0,
- 0x00400434,
- 0x004004b4,
- 0x00400438,
- 0x004004b8,
- 0x0040043c,
- 0x004004bc,
- 0x00400440,
- 0x004004c0,
- 0x00400444,
- 0x004004c4,
- 0x00400448,
- 0x004004c8,
- 0x0040044c,
- 0x004004cc,
- 0x00400450,
- 0x004004d0,
- 0x00400454,
- 0x004004d4,
- 0x00400458,
- 0x004004d8,
- 0x0040045c,
- 0x004004dc,
- 0x00400460,
- 0x004004e0,
- 0x00400464,
- 0x004004e4,
- 0x00400468,
- 0x004004e8,
- 0x0040046c,
- 0x004004ec,
- 0x00400470,
- 0x004004f0,
- 0x00400474,
- 0x004004f4,
- 0x00400478,
- 0x004004f8,
- 0x0040047c,
- 0x004004fc,
- NV03_PGRAPH_ABS_UCLIP_XMIN,
- NV03_PGRAPH_ABS_UCLIP_XMAX,
- NV03_PGRAPH_ABS_UCLIP_YMIN,
- NV03_PGRAPH_ABS_UCLIP_YMAX,
- 0x00400550,
- 0x00400558,
- 0x00400554,
- 0x0040055c,
- NV03_PGRAPH_ABS_UCLIPA_XMIN,
- NV03_PGRAPH_ABS_UCLIPA_XMAX,
- NV03_PGRAPH_ABS_UCLIPA_YMIN,
- NV03_PGRAPH_ABS_UCLIPA_YMAX,
- NV03_PGRAPH_ABS_ICLIP_XMAX,
- NV03_PGRAPH_ABS_ICLIP_YMAX,
- NV03_PGRAPH_XY_LOGIC_MISC0,
- NV03_PGRAPH_XY_LOGIC_MISC1,
- NV03_PGRAPH_XY_LOGIC_MISC2,
- NV03_PGRAPH_XY_LOGIC_MISC3,
- NV03_PGRAPH_CLIPX_0,
- NV03_PGRAPH_CLIPX_1,
- NV03_PGRAPH_CLIPY_0,
- NV03_PGRAPH_CLIPY_1,
- NV10_PGRAPH_COMBINER0_IN_ALPHA,
- NV10_PGRAPH_COMBINER1_IN_ALPHA,
- NV10_PGRAPH_COMBINER0_IN_RGB,
- NV10_PGRAPH_COMBINER1_IN_RGB,
- NV10_PGRAPH_COMBINER_COLOR0,
- NV10_PGRAPH_COMBINER_COLOR1,
- NV10_PGRAPH_COMBINER0_OUT_ALPHA,
- NV10_PGRAPH_COMBINER1_OUT_ALPHA,
- NV10_PGRAPH_COMBINER0_OUT_RGB,
- NV10_PGRAPH_COMBINER1_OUT_RGB,
- NV10_PGRAPH_COMBINER_FINAL0,
- NV10_PGRAPH_COMBINER_FINAL1,
- 0x00400e00,
- 0x00400e04,
- 0x00400e08,
- 0x00400e0c,
- 0x00400e10,
- 0x00400e14,
- 0x00400e18,
- 0x00400e1c,
- 0x00400e20,
- 0x00400e24,
- 0x00400e28,
- 0x00400e2c,
- 0x00400e30,
- 0x00400e34,
- 0x00400e38,
- 0x00400e3c,
- NV04_PGRAPH_PASSTHRU_0,
- NV04_PGRAPH_PASSTHRU_1,
- NV04_PGRAPH_PASSTHRU_2,
- NV10_PGRAPH_DIMX_TEXTURE,
- NV10_PGRAPH_WDIMX_TEXTURE,
- NV10_PGRAPH_DVD_COLORFMT,
- NV10_PGRAPH_SCALED_FORMAT,
- NV04_PGRAPH_MISC24_0,
- NV04_PGRAPH_MISC24_1,
- NV04_PGRAPH_MISC24_2,
- NV03_PGRAPH_X_MISC,
- NV03_PGRAPH_Y_MISC,
- NV04_PGRAPH_VALID1,
- NV04_PGRAPH_VALID2,
-};
-
-static int nv17_graph_ctx_regs[] = {
- NV10_PGRAPH_DEBUG_4,
- 0x004006b0,
- 0x00400eac,
- 0x00400eb0,
- 0x00400eb4,
- 0x00400eb8,
- 0x00400ebc,
- 0x00400ec0,
- 0x00400ec4,
- 0x00400ec8,
- 0x00400ecc,
- 0x00400ed0,
- 0x00400ed4,
- 0x00400ed8,
- 0x00400edc,
- 0x00400ee0,
- 0x00400a00,
- 0x00400a04,
-};
-
-struct graph_state {
- int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
- int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
- struct pipe_state pipe_state;
- uint32_t lma_window[4];
-};
-
-#define PIPE_SAVE(dev, state, addr) \
- do { \
- int __i; \
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
- for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
- state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
- } while (0)
-
-#define PIPE_RESTORE(dev, state, addr) \
- do { \
- int __i; \
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
- for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
- } while (0)
-
-static void nv10_graph_save_pipe(struct nouveau_channel *chan)
-{
- struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
- struct pipe_state *pipe = &pgraph_ctx->pipe_state;
- struct drm_device *dev = chan->dev;
-
- PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
- PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
- PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
- PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
- PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
- PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
- PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
- PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
- PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
- PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
-}
-
-static void nv10_graph_load_pipe(struct nouveau_channel *chan)
-{
- struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
- struct pipe_state *pipe = &pgraph_ctx->pipe_state;
- struct drm_device *dev = chan->dev;
- uint32_t xfmode0, xfmode1;
- int i;
-
- nouveau_wait_for_idle(dev);
- /* XXX check haiku comments */
- xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
- xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
- nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
- nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
- for (i = 0; i < 4; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- for (i = 0; i < 4; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
- for (i = 0; i < 3; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
- for (i = 0; i < 3; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
-
-
- PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
- nouveau_wait_for_idle(dev);
-
- /* restore XFMODE */
- nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
- nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
- PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
- PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
- PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
- PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
- PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
- PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
- PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
- PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
- PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
- nouveau_wait_for_idle(dev);
-}
-
-static void nv10_graph_create_pipe(struct nouveau_channel *chan)
-{
- struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
- struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
- struct drm_device *dev = chan->dev;
- uint32_t *fifo_pipe_state_addr;
- int i;
-#define PIPE_INIT(addr) \
- do { \
- fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
- } while (0)
-#define PIPE_INIT_END(addr) \
- do { \
- uint32_t *__end_addr = fifo_pipe_state->pipe_##addr + \
- ARRAY_SIZE(fifo_pipe_state->pipe_##addr); \
- if (fifo_pipe_state_addr != __end_addr) \
- NV_ERROR(dev, "incomplete pipe init for 0x%x : %p/%p\n", \
- addr, fifo_pipe_state_addr, __end_addr); \
- } while (0)
-#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
-
- PIPE_INIT(0x0200);
- for (i = 0; i < 48; i++)
- NV_WRITE_PIPE_INIT(0x00000000);
- PIPE_INIT_END(0x0200);
-
- PIPE_INIT(0x6400);
- for (i = 0; i < 211; i++)
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x3f800000);
- NV_WRITE_PIPE_INIT(0x40000000);
- NV_WRITE_PIPE_INIT(0x40000000);
- NV_WRITE_PIPE_INIT(0x40000000);
- NV_WRITE_PIPE_INIT(0x40000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x3f800000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x3f000000);
- NV_WRITE_PIPE_INIT(0x3f000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x3f800000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x3f800000);
- NV_WRITE_PIPE_INIT(0x3f800000);
- NV_WRITE_PIPE_INIT(0x3f800000);
- NV_WRITE_PIPE_INIT(0x3f800000);
- PIPE_INIT_END(0x6400);
-
- PIPE_INIT(0x6800);
- for (i = 0; i < 162; i++)
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x3f800000);
- for (i = 0; i < 25; i++)
- NV_WRITE_PIPE_INIT(0x00000000);
- PIPE_INIT_END(0x6800);
-
- PIPE_INIT(0x6c00);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0xbf800000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- PIPE_INIT_END(0x6c00);
-
- PIPE_INIT(0x7000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x7149f2ca);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x7149f2ca);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x7149f2ca);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x7149f2ca);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x7149f2ca);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x7149f2ca);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x7149f2ca);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x00000000);
- NV_WRITE_PIPE_INIT(0x7149f2ca);
- for (i = 0; i < 35; i++)
- NV_WRITE_PIPE_INIT(0x00000000);
- PIPE_INIT_END(0x7000);
-
- PIPE_INIT(0x7400);
- for (i = 0; i < 48; i++)
- NV_WRITE_PIPE_INIT(0x00000000);
- PIPE_INIT_END(0x7400);
-
- PIPE_INIT(0x7800);
- for (i = 0; i < 48; i++)
- NV_WRITE_PIPE_INIT(0x00000000);
- PIPE_INIT_END(0x7800);
-
- PIPE_INIT(0x4400);
- for (i = 0; i < 32; i++)
- NV_WRITE_PIPE_INIT(0x00000000);
- PIPE_INIT_END(0x4400);
-
- PIPE_INIT(0x0000);
- for (i = 0; i < 16; i++)
- NV_WRITE_PIPE_INIT(0x00000000);
- PIPE_INIT_END(0x0000);
-
- PIPE_INIT(0x0040);
- for (i = 0; i < 4; i++)
- NV_WRITE_PIPE_INIT(0x00000000);
- PIPE_INIT_END(0x0040);
-
-#undef PIPE_INIT
-#undef PIPE_INIT_END
-#undef NV_WRITE_PIPE_INIT
-}
-
-static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++) {
- if (nv10_graph_ctx_regs[i] == reg)
- return i;
- }
- NV_ERROR(dev, "unknow offset nv10_ctx_regs %d\n", reg);
- return -1;
-}
-
-static int nv17_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
-{
- int i;
- for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++) {
- if (nv17_graph_ctx_regs[i] == reg)
- return i;
- }
- NV_ERROR(dev, "unknow offset nv17_ctx_regs %d\n", reg);
- return -1;
-}
-
-static void nv10_graph_load_dma_vtxbuf(struct nouveau_channel *chan,
- uint32_t inst)
-{
- struct drm_device *dev = chan->dev;
- uint32_t st2, st2_dl, st2_dh, fifo_ptr, fifo[0x60/4];
- uint32_t ctx_user, ctx_switch[5];
- int i, subchan = -1;
-
- /* NV10TCL_DMA_VTXBUF (method 0x18c) modifies hidden state
- * that cannot be restored via MMIO. Do it through the FIFO
- * instead.
- */
-
- /* Look for a celsius object */
- for (i = 0; i < 8; i++) {
- int class = nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(i, 0)) & 0xfff;
-
- if (class == 0x56 || class == 0x96 || class == 0x99) {
- subchan = i;
- break;
- }
- }
-
- if (subchan < 0 || !inst)
- return;
-
- /* Save the current ctx object */
- ctx_user = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
- for (i = 0; i < 5; i++)
- ctx_switch[i] = nv_rd32(dev, NV10_PGRAPH_CTX_SWITCH(i));
-
- /* Save the FIFO state */
- st2 = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
- st2_dl = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DL);
- st2_dh = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2_DH);
- fifo_ptr = nv_rd32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR);
-
- for (i = 0; i < ARRAY_SIZE(fifo); i++)
- fifo[i] = nv_rd32(dev, 0x4007a0 + 4 * i);
-
- /* Switch to the celsius subchannel */
- for (i = 0; i < 5; i++)
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i),
- nv_rd32(dev, NV10_PGRAPH_CTX_CACHE(subchan, i)));
- nv_mask(dev, NV10_PGRAPH_CTX_USER, 0xe000, subchan << 13);
-
- /* Inject NV10TCL_DMA_VTXBUF */
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, 0);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2,
- 0x2c000000 | chan->id << 20 | subchan << 16 | 0x18c);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, inst);
- nv_mask(dev, NV10_PGRAPH_CTX_CONTROL, 0, 0x10000);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-
- /* Restore the FIFO state */
- for (i = 0; i < ARRAY_SIZE(fifo); i++)
- nv_wr32(dev, 0x4007a0 + 4 * i, fifo[i]);
-
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_FIFO_PTR, fifo_ptr);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, st2);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DL, st2_dl);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2_DH, st2_dh);
-
- /* Restore the current ctx object */
- for (i = 0; i < 5; i++)
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(i), ctx_switch[i]);
- nv_wr32(dev, NV10_PGRAPH_CTX_USER, ctx_user);
-}
-
-static int
-nv10_graph_load_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct graph_state *pgraph_ctx = chan->engctx[NVOBJ_ENGINE_GR];
- uint32_t tmp;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
- nv_wr32(dev, nv10_graph_ctx_regs[i], pgraph_ctx->nv10[i]);
- if (dev_priv->chipset >= 0x17) {
- for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
- nv_wr32(dev, nv17_graph_ctx_regs[i],
- pgraph_ctx->nv17[i]);
- }
-
- nv10_graph_load_pipe(chan);
- nv10_graph_load_dma_vtxbuf(chan, (nv_rd32(dev, NV10_PGRAPH_GLOBALSTATE1)
- & 0xffff));
-
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
- tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER);
- nv_wr32(dev, NV10_PGRAPH_CTX_USER, (tmp & 0xffffff) | chan->id << 24);
- tmp = nv_rd32(dev, NV10_PGRAPH_FFINTFC_ST2);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, tmp & 0xcfffffff);
- return 0;
-}
-
-static int
-nv10_graph_unload_context(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- struct graph_state *ctx;
- uint32_t tmp;
- int i;
-
- chan = nv10_graph_channel(dev);
- if (!chan)
- return 0;
- ctx = chan->engctx[NVOBJ_ENGINE_GR];
-
- for (i = 0; i < ARRAY_SIZE(nv10_graph_ctx_regs); i++)
- ctx->nv10[i] = nv_rd32(dev, nv10_graph_ctx_regs[i]);
-
- if (dev_priv->chipset >= 0x17) {
- for (i = 0; i < ARRAY_SIZE(nv17_graph_ctx_regs); i++)
- ctx->nv17[i] = nv_rd32(dev, nv17_graph_ctx_regs[i]);
- }
-
- nv10_graph_save_pipe(chan);
-
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
- tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= 31 << 24;
- nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
- return 0;
-}
-
-static void
-nv10_graph_context_switch(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
- int chid;
-
- nouveau_wait_for_idle(dev);
-
- /* If previous context is valid, we need to save it */
- nv10_graph_unload_context(dev);
-
- /* Load context for next channel */
- chid = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
- chan = dev_priv->channels.ptr[chid];
- if (chan && chan->engctx[NVOBJ_ENGINE_GR])
- nv10_graph_load_context(chan);
-}
-
-#define NV_WRITE_CTX(reg, val) do { \
- int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
- if (offset > 0) \
- pgraph_ctx->nv10[offset] = val; \
- } while (0)
-
-#define NV17_WRITE_CTX(reg, val) do { \
- int offset = nv17_graph_ctx_regs_find_offset(dev, reg); \
- if (offset > 0) \
- pgraph_ctx->nv17[offset] = val; \
- } while (0)
-
-struct nouveau_channel *
-nv10_graph_channel(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chid = 31;
-
- if (nv_rd32(dev, NV10_PGRAPH_CTX_CONTROL) & 0x00010000)
- chid = nv_rd32(dev, NV10_PGRAPH_CTX_USER) >> 24;
-
- if (chid >= 31)
- return NULL;
-
- return dev_priv->channels.ptr[chid];
-}
-
-static int
-nv10_graph_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct graph_state *pgraph_ctx;
-
- NV_DEBUG(dev, "nv10_graph_context_create %d\n", chan->id);
-
- pgraph_ctx = kzalloc(sizeof(*pgraph_ctx), GFP_KERNEL);
- if (pgraph_ctx == NULL)
- return -ENOMEM;
- chan->engctx[engine] = pgraph_ctx;
-
- NV_WRITE_CTX(0x00400e88, 0x08000000);
- NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
- NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
- NV_WRITE_CTX(0x00400e10, 0x00001000);
- NV_WRITE_CTX(0x00400e14, 0x00001000);
- NV_WRITE_CTX(0x00400e30, 0x00080008);
- NV_WRITE_CTX(0x00400e34, 0x00080008);
- if (dev_priv->chipset >= 0x17) {
- /* is it really needed ??? */
- NV17_WRITE_CTX(NV10_PGRAPH_DEBUG_4,
- nv_rd32(dev, NV10_PGRAPH_DEBUG_4));
- NV17_WRITE_CTX(0x004006b0, nv_rd32(dev, 0x004006b0));
- NV17_WRITE_CTX(0x00400eac, 0x0fff0000);
- NV17_WRITE_CTX(0x00400eb0, 0x0fff0000);
- NV17_WRITE_CTX(0x00400ec0, 0x00000080);
- NV17_WRITE_CTX(0x00400ed0, 0x00000080);
- }
- NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
-
- nv10_graph_create_pipe(chan);
- return 0;
-}
-
-static void
-nv10_graph_context_del(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct graph_state *pgraph_ctx = chan->engctx[engine];
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-
- /* Unload the context if it's the currently active one */
- if (nv10_graph_channel(dev) == chan)
- nv10_graph_unload_context(dev);
-
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* Free the context resources */
- chan->engctx[engine] = NULL;
- kfree(pgraph_ctx);
-}
-
-static void
-nv10_graph_set_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV10_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV10_PGRAPH_TILE(i), tile->addr);
-}
-
-static int
-nv10_graph_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 tmp;
- int i;
-
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
- ~NV_PMC_ENABLE_PGRAPH);
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
- NV_PMC_ENABLE_PGRAPH);
-
- nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
- /* nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x24E00810); */ /* 0x25f92ad9 */
- nv_wr32(dev, NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
- (1<<29) |
- (1<<31));
- if (dev_priv->chipset >= 0x17) {
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
- nv_wr32(dev, 0x400a10, 0x3ff3fb6);
- nv_wr32(dev, 0x400838, 0x2f8684);
- nv_wr32(dev, 0x40083c, 0x115f3f);
- nv_wr32(dev, 0x004006b0, 0x40000020);
- } else
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
-
- /* Turn all the tiling regions off. */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv10_graph_set_tile_region(dev, i);
-
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(0), 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(1), 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(2), 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(3), 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH(4), 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_STATE, 0xFFFFFFFF);
-
- tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= 31 << 24;
- nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
- nv_wr32(dev, NV10_PGRAPH_FFINTFC_ST2, 0x08000000);
-
- return 0;
-}
-
-static int
-nv10_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- return -EBUSY;
- }
- nv10_graph_unload_context(dev);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
- return 0;
-}
-
-static int
-nv17_graph_mthd_lma_window(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct graph_state *ctx = chan->engctx[NVOBJ_ENGINE_GR];
- struct drm_device *dev = chan->dev;
- struct pipe_state *pipe = &ctx->pipe_state;
- uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
- uint32_t xfmode0, xfmode1;
- int i;
-
- ctx->lma_window[(mthd - 0x1638) / 4] = data;
-
- if (mthd != 0x1644)
- return 0;
-
- nouveau_wait_for_idle(dev);
-
- PIPE_SAVE(dev, pipe_0x0040, 0x0040);
- PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
-
- PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
-
- nouveau_wait_for_idle(dev);
-
- xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
- xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
-
- PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
- PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
- PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
- PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
-
- nouveau_wait_for_idle(dev);
-
- nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
- nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
- for (i = 0; i < 4; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- for (i = 0; i < 4; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
- for (i = 0; i < 3; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
- for (i = 0; i < 3; i++)
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
-
- PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
-
- nouveau_wait_for_idle(dev);
-
- PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
-
- nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
- nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
-
- PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
- PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
- PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
- PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
-
- nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
- nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
- nouveau_wait_for_idle(dev);
-
- return 0;
-}
-
-static int
-nv17_graph_mthd_lma_enable(struct nouveau_channel *chan,
- u32 class, u32 mthd, u32 data)
-{
- struct drm_device *dev = chan->dev;
-
- nouveau_wait_for_idle(dev);
-
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
- nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
- nv_wr32(dev, 0x004006b0,
- nv_rd32(dev, 0x004006b0) | 0x8 << 24);
-
- return 0;
-}
-
-struct nouveau_bitfield nv10_graph_intr[] = {
- { NV_PGRAPH_INTR_NOTIFY, "NOTIFY" },
- { NV_PGRAPH_INTR_ERROR, "ERROR" },
- {}
-};
-
-struct nouveau_bitfield nv10_graph_nstatus[] = {
- { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
- { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
- { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
- { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" },
- {}
-};
-
-static void
-nv10_graph_isr(struct drm_device *dev)
-{
- u32 stat;
-
- while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
- u32 chid = (addr & 0x01f00000) >> 20;
- u32 subc = (addr & 0x00070000) >> 16;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
- u32 show = stat;
-
- if (stat & NV_PGRAPH_INTR_ERROR) {
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
- show &= ~NV_PGRAPH_INTR_ERROR;
- }
- }
-
- if (stat & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
- nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
- stat &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- show &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
- nv10_graph_context_switch(dev);
- }
-
- nv_wr32(dev, NV03_PGRAPH_INTR, stat);
- nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PGRAPH -");
- nouveau_bitfield_print(nv10_graph_intr, show);
- printk(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
- nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
- }
- }
-}
-
-static void
-nv10_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nv10_graph_engine *pgraph = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 12);
- kfree(pgraph);
-}
-
-int
-nv10_graph_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv10_graph_engine *pgraph;
-
- pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
- if (!pgraph)
- return -ENOMEM;
-
- pgraph->base.destroy = nv10_graph_destroy;
- pgraph->base.init = nv10_graph_init;
- pgraph->base.fini = nv10_graph_fini;
- pgraph->base.context_new = nv10_graph_context_new;
- pgraph->base.context_del = nv10_graph_context_del;
- pgraph->base.object_new = nv04_graph_object_new;
- pgraph->base.set_tile_region = nv10_graph_set_tile_region;
-
- NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
- nouveau_irq_register(dev, 12, nv10_graph_isr);
-
- NVOBJ_CLASS(dev, 0x0030, GR); /* null */
- NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
- NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
- NVOBJ_CLASS(dev, 0x005f, GR); /* imageblit */
- NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
- NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
- NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
- NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
- NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
- NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
- NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
- NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
- NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
- NVOBJ_CLASS(dev, 0x0052, GR); /* swzsurf */
- NVOBJ_CLASS(dev, 0x0093, GR); /* surf3d */
- NVOBJ_CLASS(dev, 0x0094, GR); /* tex_tri */
- NVOBJ_CLASS(dev, 0x0095, GR); /* multitex_tri */
-
- /* celcius */
- if (dev_priv->chipset <= 0x10) {
- NVOBJ_CLASS(dev, 0x0056, GR);
- } else
- if (dev_priv->chipset < 0x17 || dev_priv->chipset == 0x1a) {
- NVOBJ_CLASS(dev, 0x0096, GR);
- } else {
- NVOBJ_CLASS(dev, 0x0099, GR);
- NVOBJ_MTHD (dev, 0x0099, 0x1638, nv17_graph_mthd_lma_window);
- NVOBJ_MTHD (dev, 0x0099, 0x163c, nv17_graph_mthd_lma_window);
- NVOBJ_MTHD (dev, 0x0099, 0x1640, nv17_graph_mthd_lma_window);
- NVOBJ_MTHD (dev, 0x0099, 0x1644, nv17_graph_mthd_lma_window);
- NVOBJ_MTHD (dev, 0x0099, 0x1658, nv17_graph_mthd_lma_enable);
- }
-
- return 0;
-}
+++ /dev/null
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_util.h"
-#include "nouveau_ramht.h"
-
-static struct ramfc_desc {
- unsigned bits:6;
- unsigned ctxs:5;
- unsigned ctxp:8;
- unsigned regs:5;
- unsigned regp;
-} nv17_ramfc[] = {
- { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
- { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
- { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
- { 16, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
- { 16, 16, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
- { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_STATE },
- { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
- { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_ENGINE },
- { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_PULL1 },
- { 32, 0, 0x20, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
- { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
- { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
- { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
- { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
- {}
-};
-
-struct nv17_fifo_priv {
- struct nouveau_fifo_priv base;
- struct ramfc_desc *ramfc_desc;
-};
-
-struct nv17_fifo_chan {
- struct nouveau_fifo_chan base;
- struct nouveau_gpuobj *ramfc;
-};
-
-static int
-nv17_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv17_fifo_priv *priv = nv_engine(dev, engine);
- struct nv17_fifo_chan *fctx;
- unsigned long flags;
- int ret;
-
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- /* map channel control registers */
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* initialise default fifo context */
- ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
- chan->id * 64, ~0, 64,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
- if (ret)
- goto error;
-
- nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
- nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
- nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
- nv_wo32(fctx->ramfc, 0x14, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
-
- /* enable dma mode on the channel */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
-}
-
-static int
-nv17_fifo_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv17_fifo_priv *priv = nv_engine(dev, engine);
- int i;
-
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
-
- nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
- nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
-
- nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht->bits - 9) << 16) |
- (dev_priv->ramht->gpuobj->pinst >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
- nv_wr32(dev, NV03_PFIFO_RAMFC, 0x00010000 |
- dev_priv->ramfc->pinst >> 8);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
-
- nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- nv_wr32(dev, NV03_PFIFO_CACHES, 1);
-
- for (i = 0; i < priv->base.channels; i++) {
- if (dev_priv->channels.ptr[i])
- nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
- }
-
- return 0;
-}
-
-int
-nv17_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv17_fifo_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.base.destroy = nv04_fifo_destroy;
- priv->base.base.init = nv17_fifo_init;
- priv->base.base.fini = nv04_fifo_fini;
- priv->base.base.context_new = nv17_fifo_context_new;
- priv->base.base.context_del = nv04_fifo_context_del;
- priv->base.channels = 31;
- priv->ramfc_desc = nv17_ramfc;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- return 0;
-}
#include "nouveau_encoder.h"
#include "nouveau_connector.h"
#include "nouveau_crtc.h"
-#include "nouveau_gpio.h"
+#include <subdev/gpio.h>
#include "nouveau_hw.h"
#include "nv17_tv.h"
+++ /dev/null
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-static struct drm_mm_node *
-nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct drm_mm_node *mem;
- int ret;
-
- ret = drm_mm_pre_get(&pfb->tag_heap);
- if (ret)
- return NULL;
-
- spin_lock(&dev_priv->tile.lock);
- mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
- if (mem)
- mem = drm_mm_get_block_atomic(mem, size, 0);
- spin_unlock(&dev_priv->tile.lock);
-
- return mem;
-}
-
-static void
-nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node **pmem)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_mm_node *mem = *pmem;
- if (mem) {
- spin_lock(&dev_priv->tile.lock);
- drm_mm_put_block(mem);
- spin_unlock(&dev_priv->tile.lock);
- *pmem = NULL;
- }
-}
-
-void
-nv20_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch, uint32_t flags)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
- int bpp = (flags & NOUVEAU_GEM_TILE_32BPP ? 32 : 16);
-
- tile->addr = 0x00000001 | addr;
- tile->limit = max(1u, addr + size) - 1;
- tile->pitch = pitch;
-
- /* Allocate some of the on-die tag memory, used to store Z
- * compression meta-data (most likely just a bitmap determining
- * if a given tile is compressed or not).
- */
- if (flags & NOUVEAU_GEM_TILE_ZETA) {
- tile->tag_mem = nv20_fb_alloc_tag(dev, size / 256);
- if (tile->tag_mem) {
- /* Enable Z compression */
- tile->zcomp = tile->tag_mem->start;
- if (dev_priv->chipset >= 0x25) {
- if (bpp == 16)
- tile->zcomp |= NV25_PFB_ZCOMP_MODE_16;
- else
- tile->zcomp |= NV25_PFB_ZCOMP_MODE_32;
- } else {
- tile->zcomp |= NV20_PFB_ZCOMP_EN;
- if (bpp != 16)
- tile->zcomp |= NV20_PFB_ZCOMP_MODE_32;
- }
- }
-
- tile->addr |= 2;
- }
-}
-
-void
-nv20_fb_free_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- tile->addr = tile->limit = tile->pitch = tile->zcomp = 0;
- nv20_fb_free_tag(dev, &tile->tag_mem);
-}
-
-void
-nv20_fb_set_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
- nv_wr32(dev, NV20_PFB_ZCOMP(i), tile->zcomp);
-}
-
-int
-nv20_fb_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 mem_size = nv_rd32(dev, 0x10020c);
- u32 pbus1218 = nv_rd32(dev, 0x001218);
-
- dev_priv->vram_size = mem_size & 0xff000000;
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_GDDR2; break;
- }
-
- return 0;
-}
-
-int
-nv20_fb_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- int i;
-
- if (dev_priv->chipset >= 0x25)
- drm_mm_init(&pfb->tag_heap, 0, 64 * 1024);
- else
- drm_mm_init(&pfb->tag_heap, 0, 32 * 1024);
-
- /* Turn all the tiling regions off. */
- pfb->num_tiles = NV10_PFB_TILE__SIZE;
- for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_tile_region(dev, i);
-
- return 0;
-}
-
-void
-nv20_fb_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- int i;
-
- for (i = 0; i < pfb->num_tiles; i++)
- pfb->free_tile_region(dev, i);
-
- drm_mm_takedown(&pfb->tag_heap);
-}
+++ /dev/null
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-/*
- * NV20
- * -----
- * There are 3 families :
- * NV20 is 0x10de:0x020*
- * NV25/28 is 0x10de:0x025* / 0x10de:0x028*
- * NV2A is 0x10de:0x02A0
- *
- * NV30
- * -----
- * There are 3 families :
- * NV30/31 is 0x10de:0x030* / 0x10de:0x031*
- * NV34 is 0x10de:0x032*
- * NV35/36 is 0x10de:0x033* / 0x10de:0x034*
- *
- * Not seen in the wild, no dumps (probably NV35) :
- * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
- * NV38 is 0x10de:0x0333, 0x10de:0x00fe
- *
- */
-
-struct nv20_graph_engine {
- struct nouveau_exec_engine base;
- struct nouveau_gpuobj *ctxtab;
- void (*grctx_init)(struct nouveau_gpuobj *);
- u32 grctx_size;
- u32 grctx_user;
-};
-
-#define NV20_GRCTX_SIZE (3580*4)
-#define NV25_GRCTX_SIZE (3529*4)
-#define NV2A_GRCTX_SIZE (3500*4)
-
-#define NV30_31_GRCTX_SIZE (24392)
-#define NV34_GRCTX_SIZE (18140)
-#define NV35_36_GRCTX_SIZE (22396)
-
-int
-nv20_graph_unload_context(struct drm_device *dev)
-{
- struct nouveau_channel *chan;
- struct nouveau_gpuobj *grctx;
- u32 tmp;
-
- chan = nv10_graph_channel(dev);
- if (!chan)
- return 0;
- grctx = chan->engctx[NVOBJ_ENGINE_GR];
-
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_POINTER, grctx->pinst >> 4);
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_XFER,
- NV20_PGRAPH_CHANNEL_CTX_XFER_SAVE);
-
- nouveau_wait_for_idle(dev);
-
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000000);
- tmp = nv_rd32(dev, NV10_PGRAPH_CTX_USER) & 0x00ffffff;
- tmp |= 31 << 24;
- nv_wr32(dev, NV10_PGRAPH_CTX_USER, tmp);
- return 0;
-}
-
-static void
-nv20_graph_rdi(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, writecount = 32;
- uint32_t rdi_index = 0x2c80000;
-
- if (dev_priv->chipset == 0x20) {
- rdi_index = 0x3d0000;
- writecount = 15;
- }
-
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, rdi_index);
- for (i = 0; i < writecount; i++)
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, 0);
-
- nouveau_wait_for_idle(dev);
-}
-
-static void
-nv20_graph_context_init(struct nouveau_gpuobj *ctx)
-{
- int i;
-
- nv_wo32(ctx, 0x033c, 0xffff0000);
- nv_wo32(ctx, 0x03a0, 0x0fff0000);
- nv_wo32(ctx, 0x03a4, 0x0fff0000);
- nv_wo32(ctx, 0x047c, 0x00000101);
- nv_wo32(ctx, 0x0490, 0x00000111);
- nv_wo32(ctx, 0x04a8, 0x44400000);
- for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(ctx, i, 0x00080000);
- for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(ctx, i, 0x000105b8);
- for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x05a4, 0x4b7fffff);
- nv_wo32(ctx, 0x05fc, 0x00000001);
- nv_wo32(ctx, 0x0604, 0x00004000);
- nv_wo32(ctx, 0x0610, 0x00000001);
- nv_wo32(ctx, 0x0618, 0x00040000);
- nv_wo32(ctx, 0x061c, 0x00010000);
- for (i = 0x1c1c; i <= 0x248c; i += 16) {
- nv_wo32(ctx, (i + 0), 0x10700ff9);
- nv_wo32(ctx, (i + 4), 0x0436086c);
- nv_wo32(ctx, (i + 8), 0x000c001b);
- }
- nv_wo32(ctx, 0x281c, 0x3f800000);
- nv_wo32(ctx, 0x2830, 0x3f800000);
- nv_wo32(ctx, 0x285c, 0x40000000);
- nv_wo32(ctx, 0x2860, 0x3f800000);
- nv_wo32(ctx, 0x2864, 0x3f000000);
- nv_wo32(ctx, 0x286c, 0x40000000);
- nv_wo32(ctx, 0x2870, 0x3f800000);
- nv_wo32(ctx, 0x2878, 0xbf800000);
- nv_wo32(ctx, 0x2880, 0xbf800000);
- nv_wo32(ctx, 0x34a4, 0x000fe000);
- nv_wo32(ctx, 0x3530, 0x000003f8);
- nv_wo32(ctx, 0x3540, 0x002fe000);
- for (i = 0x355c; i <= 0x3578; i += 4)
- nv_wo32(ctx, i, 0x001c527c);
-}
-
-static void
-nv25_graph_context_init(struct nouveau_gpuobj *ctx)
-{
- int i;
-
- nv_wo32(ctx, 0x035c, 0xffff0000);
- nv_wo32(ctx, 0x03c0, 0x0fff0000);
- nv_wo32(ctx, 0x03c4, 0x0fff0000);
- nv_wo32(ctx, 0x049c, 0x00000101);
- nv_wo32(ctx, 0x04b0, 0x00000111);
- nv_wo32(ctx, 0x04c8, 0x00000080);
- nv_wo32(ctx, 0x04cc, 0xffff0000);
- nv_wo32(ctx, 0x04d0, 0x00000001);
- nv_wo32(ctx, 0x04e4, 0x44400000);
- nv_wo32(ctx, 0x04fc, 0x4b800000);
- for (i = 0x0510; i <= 0x051c; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x0530; i <= 0x053c; i += 4)
- nv_wo32(ctx, i, 0x00080000);
- for (i = 0x0548; i <= 0x0554; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x0558; i <= 0x0564; i += 4)
- nv_wo32(ctx, i, 0x000105b8);
- for (i = 0x0568; i <= 0x0574; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- for (i = 0x0598; i <= 0x05d4; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x05e0, 0x4b7fffff);
- nv_wo32(ctx, 0x0620, 0x00000080);
- nv_wo32(ctx, 0x0624, 0x30201000);
- nv_wo32(ctx, 0x0628, 0x70605040);
- nv_wo32(ctx, 0x062c, 0xb0a09080);
- nv_wo32(ctx, 0x0630, 0xf0e0d0c0);
- nv_wo32(ctx, 0x0664, 0x00000001);
- nv_wo32(ctx, 0x066c, 0x00004000);
- nv_wo32(ctx, 0x0678, 0x00000001);
- nv_wo32(ctx, 0x0680, 0x00040000);
- nv_wo32(ctx, 0x0684, 0x00010000);
- for (i = 0x1b04; i <= 0x2374; i += 16) {
- nv_wo32(ctx, (i + 0), 0x10700ff9);
- nv_wo32(ctx, (i + 4), 0x0436086c);
- nv_wo32(ctx, (i + 8), 0x000c001b);
- }
- nv_wo32(ctx, 0x2704, 0x3f800000);
- nv_wo32(ctx, 0x2718, 0x3f800000);
- nv_wo32(ctx, 0x2744, 0x40000000);
- nv_wo32(ctx, 0x2748, 0x3f800000);
- nv_wo32(ctx, 0x274c, 0x3f000000);
- nv_wo32(ctx, 0x2754, 0x40000000);
- nv_wo32(ctx, 0x2758, 0x3f800000);
- nv_wo32(ctx, 0x2760, 0xbf800000);
- nv_wo32(ctx, 0x2768, 0xbf800000);
- nv_wo32(ctx, 0x308c, 0x000fe000);
- nv_wo32(ctx, 0x3108, 0x000003f8);
- nv_wo32(ctx, 0x3468, 0x002fe000);
- for (i = 0x3484; i <= 0x34a0; i += 4)
- nv_wo32(ctx, i, 0x001c527c);
-}
-
-static void
-nv2a_graph_context_init(struct nouveau_gpuobj *ctx)
-{
- int i;
-
- nv_wo32(ctx, 0x033c, 0xffff0000);
- nv_wo32(ctx, 0x03a0, 0x0fff0000);
- nv_wo32(ctx, 0x03a4, 0x0fff0000);
- nv_wo32(ctx, 0x047c, 0x00000101);
- nv_wo32(ctx, 0x0490, 0x00000111);
- nv_wo32(ctx, 0x04a8, 0x44400000);
- for (i = 0x04d4; i <= 0x04e0; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x04f4; i <= 0x0500; i += 4)
- nv_wo32(ctx, i, 0x00080000);
- for (i = 0x050c; i <= 0x0518; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x051c; i <= 0x0528; i += 4)
- nv_wo32(ctx, i, 0x000105b8);
- for (i = 0x052c; i <= 0x0538; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- for (i = 0x055c; i <= 0x0598; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x05a4, 0x4b7fffff);
- nv_wo32(ctx, 0x05fc, 0x00000001);
- nv_wo32(ctx, 0x0604, 0x00004000);
- nv_wo32(ctx, 0x0610, 0x00000001);
- nv_wo32(ctx, 0x0618, 0x00040000);
- nv_wo32(ctx, 0x061c, 0x00010000);
- for (i = 0x1a9c; i <= 0x22fc; i += 16) { /*XXX: check!! */
- nv_wo32(ctx, (i + 0), 0x10700ff9);
- nv_wo32(ctx, (i + 4), 0x0436086c);
- nv_wo32(ctx, (i + 8), 0x000c001b);
- }
- nv_wo32(ctx, 0x269c, 0x3f800000);
- nv_wo32(ctx, 0x26b0, 0x3f800000);
- nv_wo32(ctx, 0x26dc, 0x40000000);
- nv_wo32(ctx, 0x26e0, 0x3f800000);
- nv_wo32(ctx, 0x26e4, 0x3f000000);
- nv_wo32(ctx, 0x26ec, 0x40000000);
- nv_wo32(ctx, 0x26f0, 0x3f800000);
- nv_wo32(ctx, 0x26f8, 0xbf800000);
- nv_wo32(ctx, 0x2700, 0xbf800000);
- nv_wo32(ctx, 0x3024, 0x000fe000);
- nv_wo32(ctx, 0x30a0, 0x000003f8);
- nv_wo32(ctx, 0x33fc, 0x002fe000);
- for (i = 0x341c; i <= 0x3438; i += 4)
- nv_wo32(ctx, i, 0x001c527c);
-}
-
-static void
-nv30_31_graph_context_init(struct nouveau_gpuobj *ctx)
-{
- int i;
-
- nv_wo32(ctx, 0x0410, 0x00000101);
- nv_wo32(ctx, 0x0424, 0x00000111);
- nv_wo32(ctx, 0x0428, 0x00000060);
- nv_wo32(ctx, 0x0444, 0x00000080);
- nv_wo32(ctx, 0x0448, 0xffff0000);
- nv_wo32(ctx, 0x044c, 0x00000001);
- nv_wo32(ctx, 0x0460, 0x44400000);
- nv_wo32(ctx, 0x048c, 0xffff0000);
- for (i = 0x04e0; i < 0x04e8; i += 4)
- nv_wo32(ctx, i, 0x0fff0000);
- nv_wo32(ctx, 0x04ec, 0x00011100);
- for (i = 0x0508; i < 0x0548; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x0550, 0x4b7fffff);
- nv_wo32(ctx, 0x058c, 0x00000080);
- nv_wo32(ctx, 0x0590, 0x30201000);
- nv_wo32(ctx, 0x0594, 0x70605040);
- nv_wo32(ctx, 0x0598, 0xb8a89888);
- nv_wo32(ctx, 0x059c, 0xf8e8d8c8);
- nv_wo32(ctx, 0x05b0, 0xb0000000);
- for (i = 0x0600; i < 0x0640; i += 4)
- nv_wo32(ctx, i, 0x00010588);
- for (i = 0x0640; i < 0x0680; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x06c0; i < 0x0700; i += 4)
- nv_wo32(ctx, i, 0x0008aae4);
- for (i = 0x0700; i < 0x0740; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x0740; i < 0x0780; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- nv_wo32(ctx, 0x085c, 0x00040000);
- nv_wo32(ctx, 0x0860, 0x00010000);
- for (i = 0x0864; i < 0x0874; i += 4)
- nv_wo32(ctx, i, 0x00040004);
- for (i = 0x1f18; i <= 0x3088 ; i += 16) {
- nv_wo32(ctx, i + 0, 0x10700ff9);
- nv_wo32(ctx, i + 1, 0x0436086c);
- nv_wo32(ctx, i + 2, 0x000c001b);
- }
- for (i = 0x30b8; i < 0x30c8; i += 4)
- nv_wo32(ctx, i, 0x0000ffff);
- nv_wo32(ctx, 0x344c, 0x3f800000);
- nv_wo32(ctx, 0x3808, 0x3f800000);
- nv_wo32(ctx, 0x381c, 0x3f800000);
- nv_wo32(ctx, 0x3848, 0x40000000);
- nv_wo32(ctx, 0x384c, 0x3f800000);
- nv_wo32(ctx, 0x3850, 0x3f000000);
- nv_wo32(ctx, 0x3858, 0x40000000);
- nv_wo32(ctx, 0x385c, 0x3f800000);
- nv_wo32(ctx, 0x3864, 0xbf800000);
- nv_wo32(ctx, 0x386c, 0xbf800000);
-}
-
-static void
-nv34_graph_context_init(struct nouveau_gpuobj *ctx)
-{
- int i;
-
- nv_wo32(ctx, 0x040c, 0x01000101);
- nv_wo32(ctx, 0x0420, 0x00000111);
- nv_wo32(ctx, 0x0424, 0x00000060);
- nv_wo32(ctx, 0x0440, 0x00000080);
- nv_wo32(ctx, 0x0444, 0xffff0000);
- nv_wo32(ctx, 0x0448, 0x00000001);
- nv_wo32(ctx, 0x045c, 0x44400000);
- nv_wo32(ctx, 0x0480, 0xffff0000);
- for (i = 0x04d4; i < 0x04dc; i += 4)
- nv_wo32(ctx, i, 0x0fff0000);
- nv_wo32(ctx, 0x04e0, 0x00011100);
- for (i = 0x04fc; i < 0x053c; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x0544, 0x4b7fffff);
- nv_wo32(ctx, 0x057c, 0x00000080);
- nv_wo32(ctx, 0x0580, 0x30201000);
- nv_wo32(ctx, 0x0584, 0x70605040);
- nv_wo32(ctx, 0x0588, 0xb8a89888);
- nv_wo32(ctx, 0x058c, 0xf8e8d8c8);
- nv_wo32(ctx, 0x05a0, 0xb0000000);
- for (i = 0x05f0; i < 0x0630; i += 4)
- nv_wo32(ctx, i, 0x00010588);
- for (i = 0x0630; i < 0x0670; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x06b0; i < 0x06f0; i += 4)
- nv_wo32(ctx, i, 0x0008aae4);
- for (i = 0x06f0; i < 0x0730; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x0730; i < 0x0770; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- nv_wo32(ctx, 0x0850, 0x00040000);
- nv_wo32(ctx, 0x0854, 0x00010000);
- for (i = 0x0858; i < 0x0868; i += 4)
- nv_wo32(ctx, i, 0x00040004);
- for (i = 0x15ac; i <= 0x271c ; i += 16) {
- nv_wo32(ctx, i + 0, 0x10700ff9);
- nv_wo32(ctx, i + 1, 0x0436086c);
- nv_wo32(ctx, i + 2, 0x000c001b);
- }
- for (i = 0x274c; i < 0x275c; i += 4)
- nv_wo32(ctx, i, 0x0000ffff);
- nv_wo32(ctx, 0x2ae0, 0x3f800000);
- nv_wo32(ctx, 0x2e9c, 0x3f800000);
- nv_wo32(ctx, 0x2eb0, 0x3f800000);
- nv_wo32(ctx, 0x2edc, 0x40000000);
- nv_wo32(ctx, 0x2ee0, 0x3f800000);
- nv_wo32(ctx, 0x2ee4, 0x3f000000);
- nv_wo32(ctx, 0x2eec, 0x40000000);
- nv_wo32(ctx, 0x2ef0, 0x3f800000);
- nv_wo32(ctx, 0x2ef8, 0xbf800000);
- nv_wo32(ctx, 0x2f00, 0xbf800000);
-}
-
-static void
-nv35_36_graph_context_init(struct nouveau_gpuobj *ctx)
-{
- int i;
-
- nv_wo32(ctx, 0x040c, 0x00000101);
- nv_wo32(ctx, 0x0420, 0x00000111);
- nv_wo32(ctx, 0x0424, 0x00000060);
- nv_wo32(ctx, 0x0440, 0x00000080);
- nv_wo32(ctx, 0x0444, 0xffff0000);
- nv_wo32(ctx, 0x0448, 0x00000001);
- nv_wo32(ctx, 0x045c, 0x44400000);
- nv_wo32(ctx, 0x0488, 0xffff0000);
- for (i = 0x04dc; i < 0x04e4; i += 4)
- nv_wo32(ctx, i, 0x0fff0000);
- nv_wo32(ctx, 0x04e8, 0x00011100);
- for (i = 0x0504; i < 0x0544; i += 4)
- nv_wo32(ctx, i, 0x07ff0000);
- nv_wo32(ctx, 0x054c, 0x4b7fffff);
- nv_wo32(ctx, 0x0588, 0x00000080);
- nv_wo32(ctx, 0x058c, 0x30201000);
- nv_wo32(ctx, 0x0590, 0x70605040);
- nv_wo32(ctx, 0x0594, 0xb8a89888);
- nv_wo32(ctx, 0x0598, 0xf8e8d8c8);
- nv_wo32(ctx, 0x05ac, 0xb0000000);
- for (i = 0x0604; i < 0x0644; i += 4)
- nv_wo32(ctx, i, 0x00010588);
- for (i = 0x0644; i < 0x0684; i += 4)
- nv_wo32(ctx, i, 0x00030303);
- for (i = 0x06c4; i < 0x0704; i += 4)
- nv_wo32(ctx, i, 0x0008aae4);
- for (i = 0x0704; i < 0x0744; i += 4)
- nv_wo32(ctx, i, 0x01012000);
- for (i = 0x0744; i < 0x0784; i += 4)
- nv_wo32(ctx, i, 0x00080008);
- nv_wo32(ctx, 0x0860, 0x00040000);
- nv_wo32(ctx, 0x0864, 0x00010000);
- for (i = 0x0868; i < 0x0878; i += 4)
- nv_wo32(ctx, i, 0x00040004);
- for (i = 0x1f1c; i <= 0x308c ; i += 16) {
- nv_wo32(ctx, i + 0, 0x10700ff9);
- nv_wo32(ctx, i + 4, 0x0436086c);
- nv_wo32(ctx, i + 8, 0x000c001b);
- }
- for (i = 0x30bc; i < 0x30cc; i += 4)
- nv_wo32(ctx, i, 0x0000ffff);
- nv_wo32(ctx, 0x3450, 0x3f800000);
- nv_wo32(ctx, 0x380c, 0x3f800000);
- nv_wo32(ctx, 0x3820, 0x3f800000);
- nv_wo32(ctx, 0x384c, 0x40000000);
- nv_wo32(ctx, 0x3850, 0x3f800000);
- nv_wo32(ctx, 0x3854, 0x3f000000);
- nv_wo32(ctx, 0x385c, 0x40000000);
- nv_wo32(ctx, 0x3860, 0x3f800000);
- nv_wo32(ctx, 0x3868, 0xbf800000);
- nv_wo32(ctx, 0x3870, 0xbf800000);
-}
-
-int
-nv20_graph_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
- struct nouveau_gpuobj *grctx = NULL;
- struct drm_device *dev = chan->dev;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &grctx);
- if (ret)
- return ret;
-
- /* Initialise default context values */
- pgraph->grctx_init(grctx);
-
- /* nv20: nv_wo32(dev, chan->ramin_grctx->gpuobj, 10, chan->id<<24); */
- /* CTX_USER */
- nv_wo32(grctx, pgraph->grctx_user, (chan->id << 24) | 0x1);
-
- nv_wo32(pgraph->ctxtab, chan->id * 4, grctx->pinst >> 4);
- chan->engctx[engine] = grctx;
- return 0;
-}
-
-void
-nv20_graph_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nv20_graph_engine *pgraph = nv_engine(chan->dev, engine);
- struct nouveau_gpuobj *grctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
-
- /* Unload the context if it's the currently active one */
- if (nv10_graph_channel(dev) == chan)
- nv20_graph_unload_context(dev);
-
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* Free the context resources */
- nv_wo32(pgraph->ctxtab, chan->id * 4, 0);
-
- nouveau_gpuobj_ref(NULL, &grctx);
- chan->engctx[engine] = NULL;
-}
-
-static void
-nv20_graph_set_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
-
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->limit);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->pitch);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->addr);
-
- if (dev_priv->card_type == NV_20) {
- nv_wr32(dev, NV20_PGRAPH_ZCOMP(i), tile->zcomp);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00ea0090 + 4 * i);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, tile->zcomp);
- }
-}
-
-int
-nv20_graph_init(struct drm_device *dev, int engine)
-{
- struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t tmp, vramsz;
- int i;
-
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
-
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
-
- nv20_graph_rdi(dev);
-
- nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x00118700);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
- nv_wr32(dev, 0x40009C , 0x00000040);
-
- if (dev_priv->chipset >= 0x25) {
- nv_wr32(dev, 0x400890, 0x00a8cfff);
- nv_wr32(dev, 0x400610, 0x304B1FB6);
- nv_wr32(dev, 0x400B80, 0x1cbd3883);
- nv_wr32(dev, 0x400B84, 0x44000000);
- nv_wr32(dev, 0x400098, 0x40000080);
- nv_wr32(dev, 0x400B88, 0x000000ff);
-
- } else {
- nv_wr32(dev, 0x400880, 0x0008c7df);
- nv_wr32(dev, 0x400094, 0x00000005);
- nv_wr32(dev, 0x400B80, 0x45eae20e);
- nv_wr32(dev, 0x400B84, 0x24000000);
- nv_wr32(dev, 0x400098, 0x00000040);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00038);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E10038);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
- }
-
- /* Turn all the tiling regions off. */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv20_graph_set_tile_region(dev, i);
-
- nv_wr32(dev, 0x4009a0, nv_rd32(dev, 0x100324));
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA000C);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA, nv_rd32(dev, 0x100324));
-
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
- nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
-
- tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) & 0x0007ff00;
- nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
- tmp = nv_rd32(dev, NV10_PGRAPH_SURFACE) | 0x00020100;
- nv_wr32(dev, NV10_PGRAPH_SURFACE, tmp);
-
- /* begin RAM config */
- vramsz = pci_resource_len(dev->pdev, 0) - 1;
- nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , nv_rd32(dev, NV04_PFB_CFG1));
- nv_wr32(dev, 0x400820, 0);
- nv_wr32(dev, 0x400824, 0);
- nv_wr32(dev, 0x400864, vramsz - 1);
- nv_wr32(dev, 0x400868, vramsz - 1);
-
- /* interesting.. the below overwrites some of the tile setup above.. */
- nv_wr32(dev, 0x400B20, 0x00000000);
- nv_wr32(dev, 0x400B04, 0xFFFFFFFF);
-
- nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMIN, 0);
- nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
- nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
- nv_wr32(dev, NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
-
- return 0;
-}
-
-int
-nv30_graph_init(struct drm_device *dev, int engine)
-{
- struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PGRAPH);
- nv_wr32(dev, NV03_PMC_ENABLE,
- nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PGRAPH);
-
- nv_wr32(dev, NV20_PGRAPH_CHANNEL_CTX_TABLE, pgraph->ctxtab->pinst >> 4);
-
- nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
- nv_wr32(dev, 0x400890, 0x01b463ff);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xf2de0475);
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
- nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
- nv_wr32(dev, 0x400B80, 0x1003d888);
- nv_wr32(dev, 0x400B84, 0x0c000000);
- nv_wr32(dev, 0x400098, 0x00000000);
- nv_wr32(dev, 0x40009C, 0x0005ad00);
- nv_wr32(dev, 0x400B88, 0x62ff00ff); /* suspiciously like PGRAPH_DEBUG_2 */
- nv_wr32(dev, 0x4000a0, 0x00000000);
- nv_wr32(dev, 0x4000a4, 0x00000008);
- nv_wr32(dev, 0x4008a8, 0xb784a400);
- nv_wr32(dev, 0x400ba0, 0x002f8685);
- nv_wr32(dev, 0x400ba4, 0x00231f3f);
- nv_wr32(dev, 0x4008a4, 0x40000020);
-
- if (dev_priv->chipset == 0x34) {
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0004);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00200201);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0008);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000008);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0000);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000032);
- nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00E00004);
- nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000002);
- }
-
- nv_wr32(dev, 0x4000c0, 0x00000016);
-
- /* Turn all the tiling regions off. */
- for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
- nv20_graph_set_tile_region(dev, i);
-
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
- nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
- nv_wr32(dev, 0x0040075c , 0x00000001);
-
- /* begin RAM config */
- /* vramsz = pci_resource_len(dev->pdev, 0) - 1; */
- nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
- if (dev_priv->chipset != 0x34) {
- nv_wr32(dev, 0x400750, 0x00EA0000);
- nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x400750, 0x00EA0004);
- nv_wr32(dev, 0x400754, nv_rd32(dev, NV04_PFB_CFG1));
- }
-
- return 0;
-}
-
-int
-nv20_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000000);
- if (!nv_wait(dev, NV04_PGRAPH_STATUS, ~0, 0) && suspend) {
- nv_mask(dev, NV04_PGRAPH_FIFO, 0x00000001, 0x00000001);
- return -EBUSY;
- }
- nv20_graph_unload_context(dev);
- nv_wr32(dev, NV03_PGRAPH_INTR_EN, 0x00000000);
- return 0;
-}
-
-static void
-nv20_graph_isr(struct drm_device *dev)
-{
- u32 stat;
-
- while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
- u32 chid = (addr & 0x01f00000) >> 20;
- u32 subc = (addr & 0x00070000) >> 16;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xfff;
- u32 show = stat;
-
- if (stat & NV_PGRAPH_INTR_ERROR) {
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
- show &= ~NV_PGRAPH_INTR_ERROR;
- }
- }
-
- nv_wr32(dev, NV03_PGRAPH_INTR, stat);
- nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PGRAPH -");
- nouveau_bitfield_print(nv10_graph_intr, show);
- printk(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
- nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - ch %d/%d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, subc, class, mthd, data);
- }
- }
-}
-
-static void
-nv20_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nv20_graph_engine *pgraph = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 12);
- nouveau_gpuobj_ref(NULL, &pgraph->ctxtab);
-
- NVOBJ_ENGINE_DEL(dev, GR);
- kfree(pgraph);
-}
-
-int
-nv20_graph_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv20_graph_engine *pgraph;
- int ret;
-
- pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
- if (!pgraph)
- return -ENOMEM;
-
- pgraph->base.destroy = nv20_graph_destroy;
- pgraph->base.fini = nv20_graph_fini;
- pgraph->base.context_new = nv20_graph_context_new;
- pgraph->base.context_del = nv20_graph_context_del;
- pgraph->base.object_new = nv04_graph_object_new;
- pgraph->base.set_tile_region = nv20_graph_set_tile_region;
-
- pgraph->grctx_user = 0x0028;
- if (dev_priv->card_type == NV_20) {
- pgraph->base.init = nv20_graph_init;
- switch (dev_priv->chipset) {
- case 0x20:
- pgraph->grctx_init = nv20_graph_context_init;
- pgraph->grctx_size = NV20_GRCTX_SIZE;
- pgraph->grctx_user = 0x0000;
- break;
- case 0x25:
- case 0x28:
- pgraph->grctx_init = nv25_graph_context_init;
- pgraph->grctx_size = NV25_GRCTX_SIZE;
- break;
- case 0x2a:
- pgraph->grctx_init = nv2a_graph_context_init;
- pgraph->grctx_size = NV2A_GRCTX_SIZE;
- pgraph->grctx_user = 0x0000;
- break;
- default:
- NV_ERROR(dev, "PGRAPH: unknown chipset\n");
- kfree(pgraph);
- return 0;
- }
- } else {
- pgraph->base.init = nv30_graph_init;
- switch (dev_priv->chipset) {
- case 0x30:
- case 0x31:
- pgraph->grctx_init = nv30_31_graph_context_init;
- pgraph->grctx_size = NV30_31_GRCTX_SIZE;
- break;
- case 0x34:
- pgraph->grctx_init = nv34_graph_context_init;
- pgraph->grctx_size = NV34_GRCTX_SIZE;
- break;
- case 0x35:
- case 0x36:
- pgraph->grctx_init = nv35_36_graph_context_init;
- pgraph->grctx_size = NV35_36_GRCTX_SIZE;
- break;
- default:
- NV_ERROR(dev, "PGRAPH: unknown chipset\n");
- kfree(pgraph);
- return 0;
- }
- }
-
- /* Create Context Pointer Table */
- ret = nouveau_gpuobj_new(dev, NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC,
- &pgraph->ctxtab);
- if (ret) {
- kfree(pgraph);
- return ret;
- }
-
- NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
- nouveau_irq_register(dev, 12, nv20_graph_isr);
-
- NVOBJ_CLASS(dev, 0x0030, GR); /* null */
- NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
- NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
- NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
- NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
- NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
- NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
- NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
- NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
- NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
- NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
- NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
- if (dev_priv->card_type == NV_20) {
- NVOBJ_CLASS(dev, 0x009e, GR); /* swzsurf */
- NVOBJ_CLASS(dev, 0x0096, GR); /* celcius */
-
- /* kelvin */
- if (dev_priv->chipset < 0x25)
- NVOBJ_CLASS(dev, 0x0097, GR);
- else
- NVOBJ_CLASS(dev, 0x0597, GR);
- } else {
- NVOBJ_CLASS(dev, 0x038a, GR); /* ifc (nv30) */
- NVOBJ_CLASS(dev, 0x0389, GR); /* sifm (nv30) */
- NVOBJ_CLASS(dev, 0x0362, GR); /* surf2d (nv30) */
- NVOBJ_CLASS(dev, 0x039e, GR); /* swzsurf */
-
- /* rankine */
- if (0x00000003 & (1 << (dev_priv->chipset & 0x0f)))
- NVOBJ_CLASS(dev, 0x0397, GR);
- else
- if (0x00000010 & (1 << (dev_priv->chipset & 0x0f)))
- NVOBJ_CLASS(dev, 0x0697, GR);
- else
- if (0x000001e0 & (1 << (dev_priv->chipset & 0x0f)))
- NVOBJ_CLASS(dev, 0x0497, GR);
- }
-
- return 0;
-}
+++ /dev/null
-/*
- * Copyright (C) 2010 Francisco Jerez.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-void
-nv30_fb_init_tile_region(struct drm_device *dev, int i, uint32_t addr,
- uint32_t size, uint32_t pitch, uint32_t flags)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- tile->addr = addr | 1;
- tile->limit = max(1u, addr + size) - 1;
- tile->pitch = pitch;
-}
-
-void
-nv30_fb_free_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- tile->addr = tile->limit = tile->pitch = 0;
-}
-
-static int
-calc_bias(struct drm_device *dev, int k, int i, int j)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int b = (dev_priv->chipset > 0x30 ?
- nv_rd32(dev, 0x122c + 0x10 * k + 0x4 * j) >> (4 * (i ^ 1)) :
- 0) & 0xf;
-
- return 2 * (b & 0x8 ? b - 0x10 : b);
-}
-
-static int
-calc_ref(struct drm_device *dev, int l, int k, int i)
-{
- int j, x = 0;
-
- for (j = 0; j < 4; j++) {
- int m = (l >> (8 * i) & 0xff) + calc_bias(dev, k, i, j);
-
- x |= (0x80 | clamp(m, 0, 0x1f)) << (8 * j);
- }
-
- return x;
-}
-
-int
-nv30_fb_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- int i, j;
-
- pfb->num_tiles = NV10_PFB_TILE__SIZE;
-
- /* Turn all the tiling regions off. */
- for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_tile_region(dev, i);
-
- /* Init the memory timing regs at 0x10037c/0x1003ac */
- if (dev_priv->chipset == 0x30 ||
- dev_priv->chipset == 0x31 ||
- dev_priv->chipset == 0x35) {
- /* Related to ROP count */
- int n = (dev_priv->chipset == 0x31 ? 2 : 4);
- int l = nv_rd32(dev, 0x1003d0);
-
- for (i = 0; i < n; i++) {
- for (j = 0; j < 3; j++)
- nv_wr32(dev, 0x10037c + 0xc * i + 0x4 * j,
- calc_ref(dev, l, 0, j));
-
- for (j = 0; j < 2; j++)
- nv_wr32(dev, 0x1003ac + 0x8 * i + 0x4 * j,
- calc_ref(dev, l, 1, j));
- }
- }
-
- return 0;
-}
-
-void
-nv30_fb_takedown(struct drm_device *dev)
-{
-}
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-
-struct nv31_mpeg_engine {
- struct nouveau_exec_engine base;
- atomic_t refcount;
-};
-
-
-static int
-nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
-
- if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
- return -EBUSY;
-
- chan->engctx[engine] = (void *)0xdeadcafe;
- return 0;
-}
-
-static void
-nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
- atomic_dec(&pmpeg->refcount);
- chan->engctx[engine] = NULL;
-}
-
-static int
-nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ctx = NULL;
- unsigned long flags;
- int ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- ret = nouveau_gpuobj_new(dev, NULL, 264 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ctx);
- if (ret)
- return ret;
-
- nv_wo32(ctx, 0x78, 0x02001ec1);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
- if ((nv_rd32(dev, 0x003204) & 0x1f) == chan->id)
- nv_wr32(dev, 0x00330c, ctx->pinst >> 4);
- nv_wo32(chan->ramfc, 0x54, ctx->pinst >> 4);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- chan->engctx[engine] = ctx;
- return 0;
-}
-
-static void
-nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- unsigned long flags;
- u32 inst = 0x80000000 | (ctx->pinst >> 4);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- if (nv_rd32(dev, 0x00b318) == inst)
- nv_mask(dev, 0x00b318, 0x80000000, 0x00000000);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- nouveau_gpuobj_ref(NULL, &ctx);
- chan->engctx[engine] = NULL;
-}
-
-static int
-nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &obj);
- if (ret)
- return ret;
- obj->engine = 2;
- obj->class = class;
-
- nv_wo32(obj, 0x00, class);
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-
-static int
-nv31_mpeg_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
- int i;
-
- /* VPE init */
- nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
- nv_wr32(dev, 0x00b0e0, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
- nv_wr32(dev, 0x00b0e8, 0x00000020); /* nvidia: rd 0x01, wr 0x20 */
-
- for (i = 0; i < dev_priv->engine.fb.num_tiles; i++)
- pmpeg->base.set_tile_region(dev, i);
-
- /* PMPEG init */
- nv_wr32(dev, 0x00b32c, 0x00000000);
- nv_wr32(dev, 0x00b314, 0x00000100);
- nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
- nv_wr32(dev, 0x00b300, 0x02001ec1);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
-
- nv_wr32(dev, 0x00b100, 0xffffffff);
- nv_wr32(dev, 0x00b140, 0xffffffff);
-
- if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
-{
- /*XXX: context save? */
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x00b140, 0x00000000);
- return 0;
-}
-
-static int
-nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
-{
- struct drm_device *dev = chan->dev;
- u32 inst = data << 4;
- u32 dma0 = nv_ri32(dev, inst + 0);
- u32 dma1 = nv_ri32(dev, inst + 4);
- u32 dma2 = nv_ri32(dev, inst + 8);
- u32 base = (dma2 & 0xfffff000) | (dma0 >> 20);
- u32 size = dma1 + 1;
-
- /* only allow linear DMA objects */
- if (!(dma0 & 0x00002000))
- return -EINVAL;
-
- if (mthd == 0x0190) {
- /* DMA_CMD */
- nv_mask(dev, 0x00b300, 0x00030000, (dma0 & 0x00030000));
- nv_wr32(dev, 0x00b334, base);
- nv_wr32(dev, 0x00b324, size);
- } else
- if (mthd == 0x01a0) {
- /* DMA_DATA */
- nv_mask(dev, 0x00b300, 0x000c0000, (dma0 & 0x00030000) << 2);
- nv_wr32(dev, 0x00b360, base);
- nv_wr32(dev, 0x00b364, size);
- } else {
- /* DMA_IMAGE, VRAM only */
- if (dma0 & 0x000c0000)
- return -EINVAL;
-
- nv_wr32(dev, 0x00b370, base);
- nv_wr32(dev, 0x00b374, size);
- }
-
- return 0;
-}
-
-static int
-nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ctx;
- unsigned long flags;
- int i;
-
- /* hardcode drm channel id on nv3x, so swmthd lookup works */
- if (dev_priv->card_type < NV_40)
- return 0;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < pfifo->channels; i++) {
- if (!dev_priv->channels.ptr[i])
- continue;
-
- ctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_MPEG];
- if (ctx && ctx->pinst == inst)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
-}
-
-static void
-nv31_vpe_set_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- nv_wr32(dev, 0x00b008 + (i * 0x10), tile->pitch);
- nv_wr32(dev, 0x00b004 + (i * 0x10), tile->limit);
- nv_wr32(dev, 0x00b000 + (i * 0x10), tile->addr);
-}
-
-static void
-nv31_mpeg_isr(struct drm_device *dev)
-{
- u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
- u32 chid = nv31_mpeg_isr_chid(dev, inst);
- u32 stat = nv_rd32(dev, 0x00b100);
- u32 type = nv_rd32(dev, 0x00b230);
- u32 mthd = nv_rd32(dev, 0x00b234);
- u32 data = nv_rd32(dev, 0x00b238);
- u32 show = stat;
-
- if (stat & 0x01000000) {
- /* happens on initial binding of the object */
- if (type == 0x00000020 && mthd == 0x0000) {
- nv_mask(dev, 0x00b308, 0x00000000, 0x00000000);
- show &= ~0x01000000;
- }
-
- if (type == 0x00000010) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, 0x3174, mthd, data))
- show &= ~0x01000000;
- }
- }
-
- nv_wr32(dev, 0x00b100, stat);
- nv_wr32(dev, 0x00b230, 0x00000001);
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PMPEG: Ch %d [0x%08x] 0x%08x 0x%08x 0x%08x 0x%08x\n",
- chid, inst, stat, type, mthd, data);
- }
-}
-
-static void
-nv31_vpe_isr(struct drm_device *dev)
-{
- if (nv_rd32(dev, 0x00b100))
- nv31_mpeg_isr(dev);
-
- if (nv_rd32(dev, 0x00b800)) {
- u32 stat = nv_rd32(dev, 0x00b800);
- NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
- nv_wr32(dev, 0xb800, stat);
- }
-}
-
-static void
-nv31_mpeg_destroy(struct drm_device *dev, int engine)
-{
- struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 0);
-
- NVOBJ_ENGINE_DEL(dev, MPEG);
- kfree(pmpeg);
-}
-
-int
-nv31_mpeg_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv31_mpeg_engine *pmpeg;
-
- pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
- if (!pmpeg)
- return -ENOMEM;
- atomic_set(&pmpeg->refcount, 0);
-
- pmpeg->base.destroy = nv31_mpeg_destroy;
- pmpeg->base.init = nv31_mpeg_init;
- pmpeg->base.fini = nv31_mpeg_fini;
- if (dev_priv->card_type < NV_40) {
- pmpeg->base.context_new = nv31_mpeg_context_new;
- pmpeg->base.context_del = nv31_mpeg_context_del;
- } else {
- pmpeg->base.context_new = nv40_mpeg_context_new;
- pmpeg->base.context_del = nv40_mpeg_context_del;
- }
- pmpeg->base.object_new = nv31_mpeg_object_new;
-
- /* ISR vector, PMC_ENABLE bit, and TILE regs are shared between
- * all VPE engines, for this driver's purposes the PMPEG engine
- * will be treated as the "master" and handle the global VPE
- * bits too
- */
- pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
- nouveau_irq_register(dev, 0, nv31_vpe_isr);
-
- NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
- NVOBJ_CLASS(dev, 0x3174, MPEG);
- NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
- NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
- NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
-
-#if 0
- NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
- NVOBJ_CLASS(dev, 0x4075, ME);
-#endif
- return 0;
-
-}
+++ /dev/null
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-void
-nv40_fb_set_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- switch (dev_priv->chipset) {
- case 0x40:
- nv_wr32(dev, NV10_PFB_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV10_PFB_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV10_PFB_TILE(i), tile->addr);
- break;
-
- default:
- nv_wr32(dev, NV40_PFB_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV40_PFB_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV40_PFB_TILE(i), tile->addr);
- break;
- }
-}
-
-static void
-nv40_fb_init_gart(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
-
- if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
- nv_wr32(dev, 0x100800, 0x00000001);
- return;
- }
-
- nv_wr32(dev, 0x100800, gart->pinst | 0x00000002);
- nv_mask(dev, 0x10008c, 0x00000100, 0x00000100);
- nv_wr32(dev, 0x100820, 0x00000000);
-}
-
-static void
-nv44_fb_init_gart(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
- u32 vinst;
-
- if (dev_priv->gart_info.type != NOUVEAU_GART_HW) {
- nv_wr32(dev, 0x100850, 0x80000000);
- nv_wr32(dev, 0x100800, 0x00000001);
- return;
- }
-
- /* calculate vram address of this PRAMIN block, object
- * must be allocated on 512KiB alignment, and not exceed
- * a total size of 512KiB for this to work correctly
- */
- vinst = nv_rd32(dev, 0x10020c);
- vinst -= ((gart->pinst >> 19) + 1) << 19;
-
- nv_wr32(dev, 0x100850, 0x80000000);
- nv_wr32(dev, 0x100818, dev_priv->gart_info.dummy.addr);
-
- nv_wr32(dev, 0x100804, dev_priv->gart_info.aper_size);
- nv_wr32(dev, 0x100850, 0x00008000);
- nv_mask(dev, 0x10008c, 0x00000200, 0x00000200);
- nv_wr32(dev, 0x100820, 0x00000000);
- nv_wr32(dev, 0x10082c, 0x00000001);
- nv_wr32(dev, 0x100800, vinst | 0x00000010);
-}
-
-int
-nv40_fb_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /* 0x001218 is actually present on a few other NV4X I looked at,
- * and even contains sane values matching 0x100474. From looking
- * at various vbios images however, this isn't the case everywhere.
- * So, I chose to use the same regs I've seen NVIDIA reading around
- * the memory detection, hopefully that'll get us the right numbers
- */
- if (dev_priv->chipset == 0x40) {
- u32 pbus1218 = nv_rd32(dev, 0x001218);
- switch (pbus1218 & 0x00000300) {
- case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_SDRAM; break;
- case 0x00000100: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
- case 0x00000200: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000300: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
- }
- } else
- if (dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
- u32 pfb914 = nv_rd32(dev, 0x100914);
- switch (pfb914 & 0x00000003) {
- case 0x00000000: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
- case 0x00000001: dev_priv->vram_type = NV_MEM_TYPE_DDR2; break;
- case 0x00000002: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
- case 0x00000003: break;
- }
- } else
- if (dev_priv->chipset != 0x4e) {
- u32 pfb474 = nv_rd32(dev, 0x100474);
- if (pfb474 & 0x00000004)
- dev_priv->vram_type = NV_MEM_TYPE_GDDR3;
- if (pfb474 & 0x00000002)
- dev_priv->vram_type = NV_MEM_TYPE_DDR2;
- if (pfb474 & 0x00000001)
- dev_priv->vram_type = NV_MEM_TYPE_DDR1;
- } else {
- dev_priv->vram_type = NV_MEM_TYPE_STOLEN;
- }
-
- dev_priv->vram_size = nv_rd32(dev, 0x10020c) & 0xff000000;
- return 0;
-}
-
-int
-nv40_fb_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- uint32_t tmp;
- int i;
-
- if (dev_priv->chipset != 0x40 && dev_priv->chipset != 0x45) {
- if (nv44_graph_class(dev))
- nv44_fb_init_gart(dev);
- else
- nv40_fb_init_gart(dev);
- }
-
- switch (dev_priv->chipset) {
- case 0x40:
- case 0x45:
- tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
- nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
- pfb->num_tiles = NV10_PFB_TILE__SIZE;
- break;
- case 0x46: /* G72 */
- case 0x47: /* G70 */
- case 0x49: /* G71 */
- case 0x4b: /* G73 */
- case 0x4c: /* C51 (G7X version) */
- pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
- break;
- default:
- pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
- break;
- }
-
- /* Turn all the tiling regions off. */
- for (i = 0; i < pfb->num_tiles; i++)
- pfb->set_tile_region(dev, i);
-
- return 0;
-}
-
-void
-nv40_fb_takedown(struct drm_device *dev)
-{
-}
+++ /dev/null
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_util.h"
-#include "nouveau_ramht.h"
-
-static struct ramfc_desc {
- unsigned bits:6;
- unsigned ctxs:5;
- unsigned ctxp:8;
- unsigned regs:5;
- unsigned regp;
-} nv40_ramfc[] = {
- { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
- { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
- { 32, 0, 0x08, 0, NV10_PFIFO_CACHE1_REF_CNT },
- { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
- { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
- { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_DMA_STATE },
- { 28, 0, 0x18, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
- { 2, 28, 0x18, 28, 0x002058 },
- { 32, 0, 0x1c, 0, NV04_PFIFO_CACHE1_ENGINE },
- { 32, 0, 0x20, 0, NV04_PFIFO_CACHE1_PULL1 },
- { 32, 0, 0x24, 0, NV10_PFIFO_CACHE1_ACQUIRE_VALUE },
- { 32, 0, 0x28, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMESTAMP },
- { 32, 0, 0x2c, 0, NV10_PFIFO_CACHE1_ACQUIRE_TIMEOUT },
- { 32, 0, 0x30, 0, NV10_PFIFO_CACHE1_SEMAPHORE },
- { 32, 0, 0x34, 0, NV10_PFIFO_CACHE1_DMA_SUBROUTINE },
- { 32, 0, 0x38, 0, NV40_PFIFO_GRCTX_INSTANCE },
- { 17, 0, 0x3c, 0, NV04_PFIFO_DMA_TIMESLICE },
- { 32, 0, 0x40, 0, 0x0032e4 },
- { 32, 0, 0x44, 0, 0x0032e8 },
- { 32, 0, 0x4c, 0, 0x002088 },
- { 32, 0, 0x50, 0, 0x003300 },
- { 32, 0, 0x54, 0, 0x00330c },
- {}
-};
-
-struct nv40_fifo_priv {
- struct nouveau_fifo_priv base;
- struct ramfc_desc *ramfc_desc;
-};
-
-struct nv40_fifo_chan {
- struct nouveau_fifo_chan base;
- struct nouveau_gpuobj *ramfc;
-};
-
-static int
-nv40_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv40_fifo_priv *priv = nv_engine(dev, engine);
- struct nv40_fifo_chan *fctx;
- unsigned long flags;
- int ret;
-
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- /* map channel control registers */
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV03_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* initialise default fifo context */
- ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
- chan->id * 128, ~0, 128,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
- if (ret)
- goto error;
-
- nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
- nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
- nv_wo32(fctx->ramfc, 0x0c, chan->pushbuf->pinst >> 4);
- nv_wo32(fctx->ramfc, 0x18, 0x30000000 |
- NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
- NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
-#ifdef __BIG_ENDIAN
- NV_PFIFO_CACHE1_BIG_ENDIAN |
-#endif
- NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
- nv_wo32(fctx->ramfc, 0x3c, 0x0001ffff);
-
- /* enable dma mode on the channel */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /*XXX: remove this later, need fifo engine context commit hook */
- nouveau_gpuobj_ref(fctx->ramfc, &chan->ramfc);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
-}
-
-static int
-nv40_fifo_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv40_fifo_priv *priv = nv_engine(dev, engine);
- int i;
-
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
- nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
-
- nv_wr32(dev, 0x002040, 0x000000ff);
- nv_wr32(dev, 0x002044, 0x2101ffff);
- nv_wr32(dev, 0x002058, 0x00000001);
-
- nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
- ((dev_priv->ramht->bits - 9) << 16) |
- (dev_priv->ramht->gpuobj->pinst >> 8));
- nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
-
- switch (dev_priv->chipset) {
- case 0x47:
- case 0x49:
- case 0x4b:
- nv_wr32(dev, 0x002230, 0x00000001);
- case 0x40:
- case 0x41:
- case 0x42:
- case 0x43:
- case 0x45:
- case 0x48:
- nv_wr32(dev, 0x002220, 0x00030002);
- break;
- default:
- nv_wr32(dev, 0x002230, 0x00000000);
- nv_wr32(dev, 0x002220, ((dev_priv->vram_size - 512 * 1024 +
- dev_priv->ramfc->pinst) >> 16) |
- 0x00030000);
- break;
- }
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
-
- nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
- nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
-
- nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
- nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
- nv_wr32(dev, NV03_PFIFO_CACHES, 1);
-
- for (i = 0; i < priv->base.channels; i++) {
- if (dev_priv->channels.ptr[i])
- nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
- }
-
- return 0;
-}
-
-int
-nv40_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv40_fifo_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.base.destroy = nv04_fifo_destroy;
- priv->base.base.init = nv40_fifo_init;
- priv->base.base.fini = nv04_fifo_fini;
- priv->base.base.context_new = nv40_fifo_context_new;
- priv->base.base.context_del = nv04_fifo_context_del;
- priv->base.channels = 31;
- priv->ramfc_desc = nv40_ramfc;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
- return 0;
-}
+++ /dev/null
-/*
- * Copyright (C) 2007 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-
-struct nv40_graph_engine {
- struct nouveau_exec_engine base;
- u32 grctx_size;
-};
-
-static int
-nv40_graph_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nv40_graph_engine *pgraph = nv_engine(chan->dev, engine);
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *grctx = NULL;
- unsigned long flags;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 16,
- NVOBJ_FLAG_ZERO_ALLOC, &grctx);
- if (ret)
- return ret;
-
- /* Initialise default context values */
- nv40_grctx_fill(dev, grctx);
- nv_wo32(grctx, 0, grctx->vinst);
-
- /* init grctx pointer in ramfc, and on PFIFO if channel is
- * already active there
- */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wo32(chan->ramfc, 0x38, grctx->vinst >> 4);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000000);
- if ((nv_rd32(dev, 0x003204) & 0x0000001f) == chan->id)
- nv_wr32(dev, 0x0032e0, grctx->vinst >> 4);
- nv_mask(dev, 0x002500, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- chan->engctx[engine] = grctx;
- return 0;
-}
-
-static void
-nv40_graph_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nouveau_gpuobj *grctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 inst = 0x01000000 | (grctx->pinst >> 4);
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x400720, 0x00000000, 0x00000001);
- if (nv_rd32(dev, 0x40032c) == inst)
- nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
- if (nv_rd32(dev, 0x400330) == inst)
- nv_mask(dev, 0x400330, 0x01000000, 0x00000000);
- nv_mask(dev, 0x400720, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* Free the context resources */
- nouveau_gpuobj_ref(NULL, &grctx);
- chan->engctx[engine] = NULL;
-}
-
-int
-nv40_graph_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct drm_device *dev = chan->dev;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 20, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
- if (ret)
- return ret;
- obj->engine = 1;
- obj->class = class;
-
- nv_wo32(obj, 0x00, class);
- nv_wo32(obj, 0x04, 0x00000000);
-#ifndef __BIG_ENDIAN
- nv_wo32(obj, 0x08, 0x00000000);
-#else
- nv_wo32(obj, 0x08, 0x01000000);
-#endif
- nv_wo32(obj, 0x0c, 0x00000000);
- nv_wo32(obj, 0x10, 0x00000000);
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-
-static void
-nv40_graph_set_tile_region(struct drm_device *dev, int i)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
-
- switch (dev_priv->chipset) {
- case 0x40:
- case 0x41: /* guess */
- case 0x42:
- case 0x43:
- case 0x45: /* guess */
- case 0x4e:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
- break;
- case 0x44:
- case 0x4a:
- nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
- break;
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b:
- case 0x4c:
- case 0x67:
- default:
- nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
- nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
- nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
- nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
- nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
- nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
- break;
- }
-}
-
-/*
- * G70 0x47
- * G71 0x49
- * NV45 0x48
- * G72[M] 0x46
- * G73 0x4b
- * C51_G7X 0x4c
- * C51 0x4e
- */
-int
-nv40_graph_init(struct drm_device *dev, int engine)
-{
- struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- uint32_t vramsz;
- int i, j;
-
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
- ~NV_PMC_ENABLE_PGRAPH);
- nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
- NV_PMC_ENABLE_PGRAPH);
-
- /* generate and upload context program */
- nv40_grctx_init(dev, &pgraph->grctx_size);
-
- /* No context present currently */
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
-
- nv_wr32(dev, NV03_PGRAPH_INTR , 0xFFFFFFFF);
- nv_wr32(dev, NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
-
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_0, 0x00000000);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_1, 0x401287c0);
- nv_wr32(dev, NV04_PGRAPH_DEBUG_3, 0xe0de8055);
- nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00008000);
- nv_wr32(dev, NV04_PGRAPH_LIMIT_VIOL_PIX, 0x00be3c5f);
-
- nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10010100);
- nv_wr32(dev, NV10_PGRAPH_STATE , 0xFFFFFFFF);
-
- j = nv_rd32(dev, 0x1540) & 0xff;
- if (j) {
- for (i = 0; !(j & 1); j >>= 1, i++)
- ;
- nv_wr32(dev, 0x405000, i);
- }
-
- if (dev_priv->chipset == 0x40) {
- nv_wr32(dev, 0x4009b0, 0x83280fff);
- nv_wr32(dev, 0x4009b4, 0x000000a0);
- } else {
- nv_wr32(dev, 0x400820, 0x83280eff);
- nv_wr32(dev, 0x400824, 0x000000a0);
- }
-
- switch (dev_priv->chipset) {
- case 0x40:
- case 0x45:
- nv_wr32(dev, 0x4009b8, 0x0078e366);
- nv_wr32(dev, 0x4009bc, 0x0000014c);
- break;
- case 0x41:
- case 0x42: /* pciid also 0x00Cx */
- /* case 0x0120: XXX (pciid) */
- nv_wr32(dev, 0x400828, 0x007596ff);
- nv_wr32(dev, 0x40082c, 0x00000108);
- break;
- case 0x43:
- nv_wr32(dev, 0x400828, 0x0072cb77);
- nv_wr32(dev, 0x40082c, 0x00000108);
- break;
- case 0x44:
- case 0x46: /* G72 */
- case 0x4a:
- case 0x4c: /* G7x-based C51 */
- case 0x4e:
- nv_wr32(dev, 0x400860, 0);
- nv_wr32(dev, 0x400864, 0);
- break;
- case 0x47: /* G70 */
- case 0x49: /* G71 */
- case 0x4b: /* G73 */
- nv_wr32(dev, 0x400828, 0x07830610);
- nv_wr32(dev, 0x40082c, 0x0000016A);
- break;
- default:
- break;
- }
-
- nv_wr32(dev, 0x400b38, 0x2ffff800);
- nv_wr32(dev, 0x400b3c, 0x00006000);
-
- /* Tiling related stuff. */
- switch (dev_priv->chipset) {
- case 0x44:
- case 0x4a:
- nv_wr32(dev, 0x400bc4, 0x1003d888);
- nv_wr32(dev, 0x400bbc, 0xb7a7b500);
- break;
- case 0x46:
- nv_wr32(dev, 0x400bc4, 0x0000e024);
- nv_wr32(dev, 0x400bbc, 0xb7a7b520);
- break;
- case 0x4c:
- case 0x4e:
- case 0x67:
- nv_wr32(dev, 0x400bc4, 0x1003d888);
- nv_wr32(dev, 0x400bbc, 0xb7a7b540);
- break;
- default:
- break;
- }
-
- /* Turn all the tiling regions off. */
- for (i = 0; i < pfb->num_tiles; i++)
- nv40_graph_set_tile_region(dev, i);
-
- /* begin RAM config */
- vramsz = pci_resource_len(dev->pdev, 0) - 1;
- switch (dev_priv->chipset) {
- case 0x40:
- nv_wr32(dev, 0x4009A4, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4009A8, nv_rd32(dev, NV04_PFB_CFG1));
- nv_wr32(dev, 0x4069A4, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4069A8, nv_rd32(dev, NV04_PFB_CFG1));
- nv_wr32(dev, 0x400820, 0);
- nv_wr32(dev, 0x400824, 0);
- nv_wr32(dev, 0x400864, vramsz);
- nv_wr32(dev, 0x400868, vramsz);
- break;
- default:
- switch (dev_priv->chipset) {
- case 0x41:
- case 0x42:
- case 0x43:
- case 0x45:
- case 0x4e:
- case 0x44:
- case 0x4a:
- nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
- break;
- default:
- nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
- break;
- }
- nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
- nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
- nv_wr32(dev, 0x400840, 0);
- nv_wr32(dev, 0x400844, 0);
- nv_wr32(dev, 0x4008A0, vramsz);
- nv_wr32(dev, 0x4008A4, vramsz);
- break;
- }
-
- return 0;
-}
-
-static int
-nv40_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- u32 inst = nv_rd32(dev, 0x40032c);
- if (inst & 0x01000000) {
- nv_wr32(dev, 0x400720, 0x00000000);
- nv_wr32(dev, 0x400784, inst);
- nv_mask(dev, 0x400310, 0x00000020, 0x00000020);
- nv_mask(dev, 0x400304, 0x00000001, 0x00000001);
- if (!nv_wait(dev, 0x400300, 0x00000001, 0x00000000)) {
- u32 insn = nv_rd32(dev, 0x400308);
- NV_ERROR(dev, "PGRAPH: ctxprog timeout 0x%08x\n", insn);
- }
- nv_mask(dev, 0x40032c, 0x01000000, 0x00000000);
- }
- return 0;
-}
-
-static int
-nv40_graph_isr_chid(struct drm_device *dev, u32 inst)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *grctx;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < pfifo->channels; i++) {
- if (!dev_priv->channels.ptr[i])
- continue;
- grctx = dev_priv->channels.ptr[i]->engctx[NVOBJ_ENGINE_GR];
-
- if (grctx && grctx->pinst == inst)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
-}
-
-static void
-nv40_graph_isr(struct drm_device *dev)
-{
- u32 stat;
-
- while ((stat = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- u32 nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
- u32 nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
- u32 inst = (nv_rd32(dev, 0x40032c) & 0x000fffff) << 4;
- u32 chid = nv40_graph_isr_chid(dev, inst);
- u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
- u32 subc = (addr & 0x00070000) >> 16;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(dev, 0x400160 + subc * 4) & 0xffff;
- u32 show = stat;
-
- if (stat & NV_PGRAPH_INTR_ERROR) {
- if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data))
- show &= ~NV_PGRAPH_INTR_ERROR;
- } else
- if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
- nv_mask(dev, 0x402000, 0, 0);
- }
- }
-
- nv_wr32(dev, NV03_PGRAPH_INTR, stat);
- nv_wr32(dev, NV04_PGRAPH_FIFO, 0x00000001);
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PGRAPH -");
- nouveau_bitfield_print(nv10_graph_intr, show);
- printk(" nsource:");
- nouveau_bitfield_print(nv04_graph_nsource, nsource);
- printk(" nstatus:");
- nouveau_bitfield_print(nv10_graph_nstatus, nstatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - ch %d (0x%08x) subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- }
- }
-}
-
-static void
-nv40_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nv40_graph_engine *pgraph = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 12);
-
- NVOBJ_ENGINE_DEL(dev, GR);
- kfree(pgraph);
-}
-
-int
-nv40_graph_create(struct drm_device *dev)
-{
- struct nv40_graph_engine *pgraph;
-
- pgraph = kzalloc(sizeof(*pgraph), GFP_KERNEL);
- if (!pgraph)
- return -ENOMEM;
-
- pgraph->base.destroy = nv40_graph_destroy;
- pgraph->base.init = nv40_graph_init;
- pgraph->base.fini = nv40_graph_fini;
- pgraph->base.context_new = nv40_graph_context_new;
- pgraph->base.context_del = nv40_graph_context_del;
- pgraph->base.object_new = nv40_graph_object_new;
- pgraph->base.set_tile_region = nv40_graph_set_tile_region;
-
- NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
- nouveau_irq_register(dev, 12, nv40_graph_isr);
-
- NVOBJ_CLASS(dev, 0x0030, GR); /* null */
- NVOBJ_CLASS(dev, 0x0039, GR); /* m2mf */
- NVOBJ_CLASS(dev, 0x004a, GR); /* gdirect */
- NVOBJ_CLASS(dev, 0x009f, GR); /* imageblit (nv12) */
- NVOBJ_CLASS(dev, 0x008a, GR); /* ifc */
- NVOBJ_CLASS(dev, 0x0089, GR); /* sifm */
- NVOBJ_CLASS(dev, 0x3089, GR); /* sifm (nv40) */
- NVOBJ_CLASS(dev, 0x0062, GR); /* surf2d */
- NVOBJ_CLASS(dev, 0x3062, GR); /* surf2d (nv40) */
- NVOBJ_CLASS(dev, 0x0043, GR); /* rop */
- NVOBJ_CLASS(dev, 0x0012, GR); /* beta1 */
- NVOBJ_CLASS(dev, 0x0072, GR); /* beta4 */
- NVOBJ_CLASS(dev, 0x0019, GR); /* cliprect */
- NVOBJ_CLASS(dev, 0x0044, GR); /* pattern */
- NVOBJ_CLASS(dev, 0x309e, GR); /* swzsurf */
-
- /* curie */
- if (nv44_graph_class(dev))
- NVOBJ_CLASS(dev, 0x4497, GR);
- else
- NVOBJ_CLASS(dev, 0x4097, GR);
-
- return 0;
-}
+++ /dev/null
-/*
- * Copyright 2009 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-/* NVIDIA context programs handle a number of other conditions which are
- * not implemented in our versions. It's not clear why NVIDIA context
- * programs have this code, nor whether it's strictly necessary for
- * correct operation. We'll implement additional handling if/when we
- * discover it's necessary.
- *
- * - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state"
- * flag is set, this gets saved into the context.
- * - On context save, the context program for all cards load nsource
- * into a flag register and check for ILLEGAL_MTHD. If it's set,
- * opcode 0x60000d is called before resuming normal operation.
- * - Some context programs check more conditions than the above. NV44
- * checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001))
- * and calls 0x60000d before resuming normal operation.
- * - At the very beginning of NVIDIA's context programs, flag 9 is checked
- * and if true 0x800001 is called with count=0, pos=0, the flag is cleared
- * and then the ctxprog is aborted. It looks like a complicated NOP,
- * its purpose is unknown.
- * - In the section of code that loads the per-vs state, NVIDIA check
- * flag 10. If it's set, they only transfer the small 0x300 byte block
- * of state + the state for a single vs as opposed to the state for
- * all vs units. It doesn't seem likely that it'll occur in normal
- * operation, especially seeing as it appears NVIDIA may have screwed
- * up the ctxprogs for some cards and have an invalid instruction
- * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
- * - There's a number of places where context offset 0 (where we place
- * the PRAMIN offset of the context) is loaded into either 0x408000,
- * 0x408004 or 0x408008. Not sure what's up there either.
- * - The ctxprogs for some cards save 0x400a00 again during the cleanup
- * path for auto-loadctx.
- */
-
-#define CP_FLAG_CLEAR 0
-#define CP_FLAG_SET 1
-#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
-#define CP_FLAG_SWAP_DIRECTION_LOAD 0
-#define CP_FLAG_SWAP_DIRECTION_SAVE 1
-#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
-#define CP_FLAG_USER_SAVE_NOT_PENDING 0
-#define CP_FLAG_USER_SAVE_PENDING 1
-#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
-#define CP_FLAG_USER_LOAD_NOT_PENDING 0
-#define CP_FLAG_USER_LOAD_PENDING 1
-#define CP_FLAG_STATUS ((3 * 32) + 0)
-#define CP_FLAG_STATUS_IDLE 0
-#define CP_FLAG_STATUS_BUSY 1
-#define CP_FLAG_AUTO_SAVE ((3 * 32) + 4)
-#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
-#define CP_FLAG_AUTO_SAVE_PENDING 1
-#define CP_FLAG_AUTO_LOAD ((3 * 32) + 5)
-#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
-#define CP_FLAG_AUTO_LOAD_PENDING 1
-#define CP_FLAG_UNK54 ((3 * 32) + 6)
-#define CP_FLAG_UNK54_CLEAR 0
-#define CP_FLAG_UNK54_SET 1
-#define CP_FLAG_ALWAYS ((3 * 32) + 8)
-#define CP_FLAG_ALWAYS_FALSE 0
-#define CP_FLAG_ALWAYS_TRUE 1
-#define CP_FLAG_UNK57 ((3 * 32) + 9)
-#define CP_FLAG_UNK57_CLEAR 0
-#define CP_FLAG_UNK57_SET 1
-
-#define CP_CTX 0x00100000
-#define CP_CTX_COUNT 0x000fc000
-#define CP_CTX_COUNT_SHIFT 14
-#define CP_CTX_REG 0x00003fff
-#define CP_LOAD_SR 0x00200000
-#define CP_LOAD_SR_VALUE 0x000fffff
-#define CP_BRA 0x00400000
-#define CP_BRA_IP 0x0000ff00
-#define CP_BRA_IP_SHIFT 8
-#define CP_BRA_IF_CLEAR 0x00000080
-#define CP_BRA_FLAG 0x0000007f
-#define CP_WAIT 0x00500000
-#define CP_WAIT_SET 0x00000080
-#define CP_WAIT_FLAG 0x0000007f
-#define CP_SET 0x00700000
-#define CP_SET_1 0x00000080
-#define CP_SET_FLAG 0x0000007f
-#define CP_NEXT_TO_SWAP 0x00600007
-#define CP_NEXT_TO_CURRENT 0x00600009
-#define CP_SET_CONTEXT_POINTER 0x0060000a
-#define CP_END 0x0060000e
-#define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */
-#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
-#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_grctx.h"
-
-/* TODO:
- * - get vs count from 0x1540
- */
-
-static int
-nv40_graph_vs_count(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- switch (dev_priv->chipset) {
- case 0x47:
- case 0x49:
- case 0x4b:
- return 8;
- case 0x40:
- return 6;
- case 0x41:
- case 0x42:
- return 5;
- case 0x43:
- case 0x44:
- case 0x46:
- case 0x4a:
- return 3;
- case 0x4c:
- case 0x4e:
- case 0x67:
- default:
- return 1;
- }
-}
-
-
-enum cp_label {
- cp_check_load = 1,
- cp_setup_auto_load,
- cp_setup_load,
- cp_setup_save,
- cp_swap_state,
- cp_swap_state3d_3_is_save,
- cp_prepare_exit,
- cp_exit,
-};
-
-static void
-nv40_graph_construct_general(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i;
-
- cp_ctx(ctx, 0x4000a4, 1);
- gr_def(ctx, 0x4000a4, 0x00000008);
- cp_ctx(ctx, 0x400144, 58);
- gr_def(ctx, 0x400144, 0x00000001);
- cp_ctx(ctx, 0x400314, 1);
- gr_def(ctx, 0x400314, 0x00000000);
- cp_ctx(ctx, 0x400400, 10);
- cp_ctx(ctx, 0x400480, 10);
- cp_ctx(ctx, 0x400500, 19);
- gr_def(ctx, 0x400514, 0x00040000);
- gr_def(ctx, 0x400524, 0x55555555);
- gr_def(ctx, 0x400528, 0x55555555);
- gr_def(ctx, 0x40052c, 0x55555555);
- gr_def(ctx, 0x400530, 0x55555555);
- cp_ctx(ctx, 0x400560, 6);
- gr_def(ctx, 0x400568, 0x0000ffff);
- gr_def(ctx, 0x40056c, 0x0000ffff);
- cp_ctx(ctx, 0x40057c, 5);
- cp_ctx(ctx, 0x400710, 3);
- gr_def(ctx, 0x400710, 0x20010001);
- gr_def(ctx, 0x400714, 0x0f73ef00);
- cp_ctx(ctx, 0x400724, 1);
- gr_def(ctx, 0x400724, 0x02008821);
- cp_ctx(ctx, 0x400770, 3);
- if (dev_priv->chipset == 0x40) {
- cp_ctx(ctx, 0x400814, 4);
- cp_ctx(ctx, 0x400828, 5);
- cp_ctx(ctx, 0x400840, 5);
- gr_def(ctx, 0x400850, 0x00000040);
- cp_ctx(ctx, 0x400858, 4);
- gr_def(ctx, 0x400858, 0x00000040);
- gr_def(ctx, 0x40085c, 0x00000040);
- gr_def(ctx, 0x400864, 0x80000000);
- cp_ctx(ctx, 0x40086c, 9);
- gr_def(ctx, 0x40086c, 0x80000000);
- gr_def(ctx, 0x400870, 0x80000000);
- gr_def(ctx, 0x400874, 0x80000000);
- gr_def(ctx, 0x400878, 0x80000000);
- gr_def(ctx, 0x400888, 0x00000040);
- gr_def(ctx, 0x40088c, 0x80000000);
- cp_ctx(ctx, 0x4009c0, 8);
- gr_def(ctx, 0x4009cc, 0x80000000);
- gr_def(ctx, 0x4009dc, 0x80000000);
- } else {
- cp_ctx(ctx, 0x400840, 20);
- if (nv44_graph_class(ctx->dev)) {
- for (i = 0; i < 8; i++)
- gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
- }
- gr_def(ctx, 0x400880, 0x00000040);
- gr_def(ctx, 0x400884, 0x00000040);
- gr_def(ctx, 0x400888, 0x00000040);
- cp_ctx(ctx, 0x400894, 11);
- gr_def(ctx, 0x400894, 0x00000040);
- if (!nv44_graph_class(ctx->dev)) {
- for (i = 0; i < 8; i++)
- gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
- }
- cp_ctx(ctx, 0x4008e0, 2);
- cp_ctx(ctx, 0x4008f8, 2);
- if (dev_priv->chipset == 0x4c ||
- (dev_priv->chipset & 0xf0) == 0x60)
- cp_ctx(ctx, 0x4009f8, 1);
- }
- cp_ctx(ctx, 0x400a00, 73);
- gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
- cp_ctx(ctx, 0x401000, 4);
- cp_ctx(ctx, 0x405004, 1);
- switch (dev_priv->chipset) {
- case 0x47:
- case 0x49:
- case 0x4b:
- cp_ctx(ctx, 0x403448, 1);
- gr_def(ctx, 0x403448, 0x00001010);
- break;
- default:
- cp_ctx(ctx, 0x403440, 1);
- switch (dev_priv->chipset) {
- case 0x40:
- gr_def(ctx, 0x403440, 0x00000010);
- break;
- case 0x44:
- case 0x46:
- case 0x4a:
- gr_def(ctx, 0x403440, 0x00003010);
- break;
- case 0x41:
- case 0x42:
- case 0x43:
- case 0x4c:
- case 0x4e:
- case 0x67:
- default:
- gr_def(ctx, 0x403440, 0x00001010);
- break;
- }
- break;
- }
-}
-
-static void
-nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i;
-
- if (dev_priv->chipset == 0x40) {
- cp_ctx(ctx, 0x401880, 51);
- gr_def(ctx, 0x401940, 0x00000100);
- } else
- if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
- dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
- cp_ctx(ctx, 0x401880, 32);
- for (i = 0; i < 16; i++)
- gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
- if (dev_priv->chipset == 0x46)
- cp_ctx(ctx, 0x401900, 16);
- cp_ctx(ctx, 0x401940, 3);
- }
- cp_ctx(ctx, 0x40194c, 18);
- gr_def(ctx, 0x401954, 0x00000111);
- gr_def(ctx, 0x401958, 0x00080060);
- gr_def(ctx, 0x401974, 0x00000080);
- gr_def(ctx, 0x401978, 0xffff0000);
- gr_def(ctx, 0x40197c, 0x00000001);
- gr_def(ctx, 0x401990, 0x46400000);
- if (dev_priv->chipset == 0x40) {
- cp_ctx(ctx, 0x4019a0, 2);
- cp_ctx(ctx, 0x4019ac, 5);
- } else {
- cp_ctx(ctx, 0x4019a0, 1);
- cp_ctx(ctx, 0x4019b4, 3);
- }
- gr_def(ctx, 0x4019bc, 0xffff0000);
- switch (dev_priv->chipset) {
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b:
- cp_ctx(ctx, 0x4019c0, 18);
- for (i = 0; i < 16; i++)
- gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888);
- break;
- }
- cp_ctx(ctx, 0x401a08, 8);
- gr_def(ctx, 0x401a10, 0x0fff0000);
- gr_def(ctx, 0x401a14, 0x0fff0000);
- gr_def(ctx, 0x401a1c, 0x00011100);
- cp_ctx(ctx, 0x401a2c, 4);
- cp_ctx(ctx, 0x401a44, 26);
- for (i = 0; i < 16; i++)
- gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
- gr_def(ctx, 0x401a8c, 0x4b7fffff);
- if (dev_priv->chipset == 0x40) {
- cp_ctx(ctx, 0x401ab8, 3);
- } else {
- cp_ctx(ctx, 0x401ab8, 1);
- cp_ctx(ctx, 0x401ac0, 1);
- }
- cp_ctx(ctx, 0x401ad0, 8);
- gr_def(ctx, 0x401ad0, 0x30201000);
- gr_def(ctx, 0x401ad4, 0x70605040);
- gr_def(ctx, 0x401ad8, 0xb8a89888);
- gr_def(ctx, 0x401adc, 0xf8e8d8c8);
- cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
- gr_def(ctx, 0x401b10, 0x40100000);
- cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
- gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
- 0x00000004 : 0x00000000);
- cp_ctx(ctx, 0x401b30, 25);
- gr_def(ctx, 0x401b34, 0x0000ffff);
- gr_def(ctx, 0x401b68, 0x435185d6);
- gr_def(ctx, 0x401b6c, 0x2155b699);
- gr_def(ctx, 0x401b70, 0xfedcba98);
- gr_def(ctx, 0x401b74, 0x00000098);
- gr_def(ctx, 0x401b84, 0xffffffff);
- gr_def(ctx, 0x401b88, 0x00ff7000);
- gr_def(ctx, 0x401b8c, 0x0000ffff);
- if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
- dev_priv->chipset != 0x4e)
- cp_ctx(ctx, 0x401b94, 1);
- cp_ctx(ctx, 0x401b98, 8);
- gr_def(ctx, 0x401b9c, 0x00ff0000);
- cp_ctx(ctx, 0x401bc0, 9);
- gr_def(ctx, 0x401be0, 0x00ffff00);
- cp_ctx(ctx, 0x401c00, 192);
- for (i = 0; i < 16; i++) { /* fragment texture units */
- gr_def(ctx, 0x401c40 + (i * 4), 0x00018488);
- gr_def(ctx, 0x401c80 + (i * 4), 0x00028202);
- gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4);
- gr_def(ctx, 0x401d40 + (i * 4), 0x01012000);
- gr_def(ctx, 0x401d80 + (i * 4), 0x00080008);
- gr_def(ctx, 0x401e00 + (i * 4), 0x00100008);
- }
- for (i = 0; i < 4; i++) { /* vertex texture units */
- gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80);
- gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202);
- gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008);
- gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008);
- }
- cp_ctx(ctx, 0x400f5c, 3);
- gr_def(ctx, 0x400f5c, 0x00000002);
- cp_ctx(ctx, 0x400f84, 1);
-}
-
-static void
-nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i;
-
- cp_ctx(ctx, 0x402000, 1);
- cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
- switch (dev_priv->chipset) {
- case 0x40:
- gr_def(ctx, 0x402404, 0x00000001);
- break;
- case 0x4c:
- case 0x4e:
- case 0x67:
- gr_def(ctx, 0x402404, 0x00000020);
- break;
- case 0x46:
- case 0x49:
- case 0x4b:
- gr_def(ctx, 0x402404, 0x00000421);
- break;
- default:
- gr_def(ctx, 0x402404, 0x00000021);
- }
- if (dev_priv->chipset != 0x40)
- gr_def(ctx, 0x402408, 0x030c30c3);
- switch (dev_priv->chipset) {
- case 0x44:
- case 0x46:
- case 0x4a:
- case 0x4c:
- case 0x4e:
- case 0x67:
- cp_ctx(ctx, 0x402440, 1);
- gr_def(ctx, 0x402440, 0x00011001);
- break;
- default:
- break;
- }
- cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
- gr_def(ctx, 0x402488, 0x3e020200);
- gr_def(ctx, 0x40248c, 0x00ffffff);
- switch (dev_priv->chipset) {
- case 0x40:
- gr_def(ctx, 0x402490, 0x60103f00);
- break;
- case 0x47:
- gr_def(ctx, 0x402490, 0x40103f00);
- break;
- case 0x41:
- case 0x42:
- case 0x49:
- case 0x4b:
- gr_def(ctx, 0x402490, 0x20103f00);
- break;
- default:
- gr_def(ctx, 0x402490, 0x0c103f00);
- break;
- }
- gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
- 0x00020000 : 0x00040000);
- cp_ctx(ctx, 0x402500, 31);
- gr_def(ctx, 0x402530, 0x00008100);
- if (dev_priv->chipset == 0x40)
- cp_ctx(ctx, 0x40257c, 6);
- cp_ctx(ctx, 0x402594, 16);
- cp_ctx(ctx, 0x402800, 17);
- gr_def(ctx, 0x402800, 0x00000001);
- switch (dev_priv->chipset) {
- case 0x47:
- case 0x49:
- case 0x4b:
- cp_ctx(ctx, 0x402864, 1);
- gr_def(ctx, 0x402864, 0x00001001);
- cp_ctx(ctx, 0x402870, 3);
- gr_def(ctx, 0x402878, 0x00000003);
- if (dev_priv->chipset != 0x47) { /* belong at end!! */
- cp_ctx(ctx, 0x402900, 1);
- cp_ctx(ctx, 0x402940, 1);
- cp_ctx(ctx, 0x402980, 1);
- cp_ctx(ctx, 0x4029c0, 1);
- cp_ctx(ctx, 0x402a00, 1);
- cp_ctx(ctx, 0x402a40, 1);
- cp_ctx(ctx, 0x402a80, 1);
- cp_ctx(ctx, 0x402ac0, 1);
- }
- break;
- case 0x40:
- cp_ctx(ctx, 0x402844, 1);
- gr_def(ctx, 0x402844, 0x00000001);
- cp_ctx(ctx, 0x402850, 1);
- break;
- default:
- cp_ctx(ctx, 0x402844, 1);
- gr_def(ctx, 0x402844, 0x00001001);
- cp_ctx(ctx, 0x402850, 2);
- gr_def(ctx, 0x402854, 0x00000003);
- break;
- }
-
- cp_ctx(ctx, 0x402c00, 4);
- gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
- 0x80800001 : 0x00888001);
- switch (dev_priv->chipset) {
- case 0x47:
- case 0x49:
- case 0x4b:
- cp_ctx(ctx, 0x402c20, 40);
- for (i = 0; i < 32; i++)
- gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff);
- cp_ctx(ctx, 0x4030b8, 13);
- gr_def(ctx, 0x4030dc, 0x00000005);
- gr_def(ctx, 0x4030e8, 0x0000ffff);
- break;
- default:
- cp_ctx(ctx, 0x402c10, 4);
- if (dev_priv->chipset == 0x40)
- cp_ctx(ctx, 0x402c20, 36);
- else
- if (dev_priv->chipset <= 0x42)
- cp_ctx(ctx, 0x402c20, 24);
- else
- if (dev_priv->chipset <= 0x4a)
- cp_ctx(ctx, 0x402c20, 16);
- else
- cp_ctx(ctx, 0x402c20, 8);
- cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
- gr_def(ctx, 0x402cd4, 0x00000005);
- if (dev_priv->chipset != 0x40)
- gr_def(ctx, 0x402ce0, 0x0000ffff);
- break;
- }
-
- cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
- cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
- cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
- for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
- gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
-
- if (dev_priv->chipset != 0x40) {
- cp_ctx(ctx, 0x403600, 1);
- gr_def(ctx, 0x403600, 0x00000001);
- }
- cp_ctx(ctx, 0x403800, 1);
-
- cp_ctx(ctx, 0x403c18, 1);
- gr_def(ctx, 0x403c18, 0x00000001);
- switch (dev_priv->chipset) {
- case 0x46:
- case 0x47:
- case 0x49:
- case 0x4b:
- cp_ctx(ctx, 0x405018, 1);
- gr_def(ctx, 0x405018, 0x08e00001);
- cp_ctx(ctx, 0x405c24, 1);
- gr_def(ctx, 0x405c24, 0x000e3000);
- break;
- }
- if (dev_priv->chipset != 0x4e)
- cp_ctx(ctx, 0x405800, 11);
- cp_ctx(ctx, 0x407000, 1);
-}
-
-static void
-nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
-{
- int len = nv44_graph_class(ctx->dev) ? 0x0084 : 0x0684;
-
- cp_out (ctx, 0x300000);
- cp_lsr (ctx, len - 4);
- cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save);
- cp_lsr (ctx, len);
- cp_name(ctx, cp_swap_state3d_3_is_save);
- cp_out (ctx, 0x800001);
-
- ctx->ctxvals_pos += len;
-}
-
-static void
-nv40_graph_construct_shader(struct nouveau_grctx *ctx)
-{
- struct drm_device *dev = ctx->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = ctx->data;
- int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
- int offset, i;
-
- vs_nr = nv40_graph_vs_count(ctx->dev);
- vs_nr_b0 = 363;
- vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
- if (dev_priv->chipset == 0x40) {
- b0_offset = 0x2200/4; /* 33a0 */
- b1_offset = 0x55a0/4; /* 1500 */
- vs_len = 0x6aa0/4;
- } else
- if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
- b0_offset = 0x2200/4; /* 2200 */
- b1_offset = 0x4400/4; /* 0b00 */
- vs_len = 0x4f00/4;
- } else {
- b0_offset = 0x1d40/4; /* 2200 */
- b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
- vs_len = nv44_graph_class(dev) ? 0x4980/4 : 0x4a40/4;
- }
-
- cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
- cp_out(ctx, nv44_graph_class(dev) ? 0x800029 : 0x800041);
-
- offset = ctx->ctxvals_pos;
- ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
-
- if (ctx->mode != NOUVEAU_GRCTX_VALS)
- return;
-
- offset += 0x0280/4;
- for (i = 0; i < 16; i++, offset += 2)
- nv_wo32(obj, offset * 4, 0x3f800000);
-
- for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
- for (i = 0; i < vs_nr_b0 * 6; i += 6)
- nv_wo32(obj, (offset + b0_offset + i) * 4, 0x00000001);
- for (i = 0; i < vs_nr_b1 * 4; i += 4)
- nv_wo32(obj, (offset + b1_offset + i) * 4, 0x3f800000);
- }
-}
-
-static void
-nv40_grctx_generate(struct nouveau_grctx *ctx)
-{
- /* decide whether we're loading/unloading the context */
- cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
- cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
-
- cp_name(ctx, cp_check_load);
- cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
- cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
- cp_bra (ctx, ALWAYS, TRUE, cp_exit);
-
- /* setup for context load */
- cp_name(ctx, cp_setup_auto_load);
- cp_wait(ctx, STATUS, IDLE);
- cp_out (ctx, CP_NEXT_TO_SWAP);
- cp_name(ctx, cp_setup_load);
- cp_wait(ctx, STATUS, IDLE);
- cp_set (ctx, SWAP_DIRECTION, LOAD);
- cp_out (ctx, 0x00910880); /* ?? */
- cp_out (ctx, 0x00901ffe); /* ?? */
- cp_out (ctx, 0x01940000); /* ?? */
- cp_lsr (ctx, 0x20);
- cp_out (ctx, 0x0060000b); /* ?? */
- cp_wait(ctx, UNK57, CLEAR);
- cp_out (ctx, 0x0060000c); /* ?? */
- cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
-
- /* setup for context save */
- cp_name(ctx, cp_setup_save);
- cp_set (ctx, SWAP_DIRECTION, SAVE);
-
- /* general PGRAPH state */
- cp_name(ctx, cp_swap_state);
- cp_pos (ctx, 0x00020/4);
- nv40_graph_construct_general(ctx);
- cp_wait(ctx, STATUS, IDLE);
-
- /* 3D state, block 1 */
- cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
- nv40_graph_construct_state3d(ctx);
- cp_wait(ctx, STATUS, IDLE);
-
- /* 3D state, block 2 */
- nv40_graph_construct_state3d_2(ctx);
-
- /* Some other block of "random" state */
- nv40_graph_construct_state3d_3(ctx);
-
- /* Per-vertex shader state */
- cp_pos (ctx, ctx->ctxvals_pos);
- nv40_graph_construct_shader(ctx);
-
- /* pre-exit state updates */
- cp_name(ctx, cp_prepare_exit);
- cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
- cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
- cp_out (ctx, CP_NEXT_TO_CURRENT);
-
- cp_name(ctx, cp_exit);
- cp_set (ctx, USER_SAVE, NOT_PENDING);
- cp_set (ctx, USER_LOAD, NOT_PENDING);
- cp_out (ctx, CP_END);
-}
-
-void
-nv40_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
-{
- nv40_grctx_generate(&(struct nouveau_grctx) {
- .dev = dev,
- .mode = NOUVEAU_GRCTX_VALS,
- .data = mem,
- });
-}
-
-void
-nv40_grctx_init(struct drm_device *dev, u32 *size)
-{
- u32 ctxprog[256], i;
- struct nouveau_grctx ctx = {
- .dev = dev,
- .mode = NOUVEAU_GRCTX_PROG,
- .data = ctxprog,
- .ctxprog_max = ARRAY_SIZE(ctxprog)
- };
-
- nv40_grctx_generate(&ctx);
-
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
- for (i = 0; i < ctx.ctxprog_len; i++)
- nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, ctxprog[i]);
- *size = ctx.ctxvals_pos * 4;
-}
+++ /dev/null
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-int
-nv40_mc_init(struct drm_device *dev)
-{
- /* Power up everything, resetting each individual unit will
- * be done later if needed.
- */
- nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
-
- if (nv44_graph_class(dev)) {
- u32 tmp = nv_rd32(dev, NV04_PFB_FIFO_DATA);
- nv_wr32(dev, NV40_PMC_1700, tmp);
- nv_wr32(dev, NV40_PMC_1704, 0);
- nv_wr32(dev, NV40_PMC_1708, 0);
- nv_wr32(dev, NV40_PMC_170C, tmp);
- }
-
- return 0;
-}
-
-void
-nv40_mc_takedown(struct drm_device *dev)
-{
-}
#include "drmP.h"
#include "nouveau_drv.h"
-#include "nouveau_bios.h"
+#include <nouveau_bios.h>
#include "nouveau_pm.h"
#include "nouveau_hw.h"
-#include "nouveau_fifo.h"
+#include <engine/fifo.h>
#define min2(a,b) ((a) < (b) ? (a) : (b))
#include "nouveau_connector.h"
#include "nouveau_fb.h"
#include "nouveau_fbcon.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_software.h"
#include "drm_crtc_helper.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nv50_display.h"
static void
+++ /dev/null
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-#include "nouveau_fifo.h"
-
-struct nv50_fb_priv {
- struct page *r100c08_page;
- dma_addr_t r100c08;
-};
-
-static void
-nv50_fb_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nv50_fb_priv *priv = pfb->priv;
-
- if (drm_mm_initialized(&pfb->tag_heap))
- drm_mm_takedown(&pfb->tag_heap);
-
- if (priv->r100c08_page) {
- pci_unmap_page(dev->pdev, priv->r100c08, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c08_page);
- }
-
- kfree(priv);
- pfb->priv = NULL;
-}
-
-static int
-nv50_fb_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nv50_fb_priv *priv;
- u32 tagmem;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- pfb->priv = priv;
-
- priv->r100c08_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!priv->r100c08_page) {
- nv50_fb_destroy(dev);
- return -ENOMEM;
- }
-
- priv->r100c08 = pci_map_page(dev->pdev, priv->r100c08_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, priv->r100c08)) {
- nv50_fb_destroy(dev);
- return -EFAULT;
- }
-
- tagmem = nv_rd32(dev, 0x100320);
- NV_DEBUG(dev, "%d tags available\n", tagmem);
- ret = drm_mm_init(&pfb->tag_heap, 0, tagmem);
- if (ret) {
- nv50_fb_destroy(dev);
- return ret;
- }
-
- return 0;
-}
-
-int
-nv50_fb_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fb_priv *priv;
- int ret;
-
- if (!dev_priv->engine.fb.priv) {
- ret = nv50_fb_create(dev);
- if (ret)
- return ret;
- }
- priv = dev_priv->engine.fb.priv;
-
- /* Not a clue what this is exactly. Without pointing it at a
- * scratch page, VRAM->GART blits with M2MF (as in DDX DFS)
- * cause IOMMU "read from address 0" errors (rh#561267)
- */
- nv_wr32(dev, 0x100c08, priv->r100c08 >> 8);
-
- /* This is needed to get meaningful information from 100c90
- * on traps. No idea what these values mean exactly. */
- switch (dev_priv->chipset) {
- case 0x50:
- nv_wr32(dev, 0x100c90, 0x000707ff);
- break;
- case 0xa3:
- case 0xa5:
- case 0xa8:
- nv_wr32(dev, 0x100c90, 0x000d0fff);
- break;
- case 0xaf:
- nv_wr32(dev, 0x100c90, 0x089d1fff);
- break;
- default:
- nv_wr32(dev, 0x100c90, 0x001d07ff);
- break;
- }
-
- return 0;
-}
-
-void
-nv50_fb_takedown(struct drm_device *dev)
-{
- nv50_fb_destroy(dev);
-}
-
-static struct nouveau_enum vm_dispatch_subclients[] = {
- { 0x00000000, "GRCTX", NULL },
- { 0x00000001, "NOTIFY", NULL },
- { 0x00000002, "QUERY", NULL },
- { 0x00000003, "COND", NULL },
- { 0x00000004, "M2M_IN", NULL },
- { 0x00000005, "M2M_OUT", NULL },
- { 0x00000006, "M2M_NOTIFY", NULL },
- {}
-};
-
-static struct nouveau_enum vm_ccache_subclients[] = {
- { 0x00000000, "CB", NULL },
- { 0x00000001, "TIC", NULL },
- { 0x00000002, "TSC", NULL },
- {}
-};
-
-static struct nouveau_enum vm_prop_subclients[] = {
- { 0x00000000, "RT0", NULL },
- { 0x00000001, "RT1", NULL },
- { 0x00000002, "RT2", NULL },
- { 0x00000003, "RT3", NULL },
- { 0x00000004, "RT4", NULL },
- { 0x00000005, "RT5", NULL },
- { 0x00000006, "RT6", NULL },
- { 0x00000007, "RT7", NULL },
- { 0x00000008, "ZETA", NULL },
- { 0x00000009, "LOCAL", NULL },
- { 0x0000000a, "GLOBAL", NULL },
- { 0x0000000b, "STACK", NULL },
- { 0x0000000c, "DST2D", NULL },
- {}
-};
-
-static struct nouveau_enum vm_pfifo_subclients[] = {
- { 0x00000000, "PUSHBUF", NULL },
- { 0x00000001, "SEMAPHORE", NULL },
- {}
-};
-
-static struct nouveau_enum vm_bar_subclients[] = {
- { 0x00000000, "FB", NULL },
- { 0x00000001, "IN", NULL },
- {}
-};
-
-static struct nouveau_enum vm_client[] = {
- { 0x00000000, "STRMOUT", NULL },
- { 0x00000003, "DISPATCH", vm_dispatch_subclients },
- { 0x00000004, "PFIFO_WRITE", NULL },
- { 0x00000005, "CCACHE", vm_ccache_subclients },
- { 0x00000006, "PPPP", NULL },
- { 0x00000007, "CLIPID", NULL },
- { 0x00000008, "PFIFO_READ", NULL },
- { 0x00000009, "VFETCH", NULL },
- { 0x0000000a, "TEXTURE", NULL },
- { 0x0000000b, "PROP", vm_prop_subclients },
- { 0x0000000c, "PVP", NULL },
- { 0x0000000d, "PBSP", NULL },
- { 0x0000000e, "PCRYPT", NULL },
- { 0x0000000f, "PCOUNTER", NULL },
- { 0x00000011, "PDAEMON", NULL },
- {}
-};
-
-static struct nouveau_enum vm_engine[] = {
- { 0x00000000, "PGRAPH", NULL },
- { 0x00000001, "PVP", NULL },
- { 0x00000004, "PEEPHOLE", NULL },
- { 0x00000005, "PFIFO", vm_pfifo_subclients },
- { 0x00000006, "BAR", vm_bar_subclients },
- { 0x00000008, "PPPP", NULL },
- { 0x00000009, "PBSP", NULL },
- { 0x0000000a, "PCRYPT", NULL },
- { 0x0000000b, "PCOUNTER", NULL },
- { 0x0000000c, "SEMAPHORE_BG", NULL },
- { 0x0000000d, "PCOPY", NULL },
- { 0x0000000e, "PDAEMON", NULL },
- {}
-};
-
-static struct nouveau_enum vm_fault[] = {
- { 0x00000000, "PT_NOT_PRESENT", NULL },
- { 0x00000001, "PT_TOO_SHORT", NULL },
- { 0x00000002, "PAGE_NOT_PRESENT", NULL },
- { 0x00000003, "PAGE_SYSTEM_ONLY", NULL },
- { 0x00000004, "PAGE_READ_ONLY", NULL },
- { 0x00000006, "NULL_DMAOBJ", NULL },
- { 0x00000007, "WRONG_MEMTYPE", NULL },
- { 0x0000000b, "VRAM_LIMIT", NULL },
- { 0x0000000f, "DMAOBJ_LIMIT", NULL },
- {}
-};
-
-void
-nv50_fb_vm_trap(struct drm_device *dev, int display)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- const struct nouveau_enum *en, *cl;
- unsigned long flags;
- u32 trap[6], idx, chinst;
- u8 st0, st1, st2, st3;
- int i, ch;
-
- idx = nv_rd32(dev, 0x100c90);
- if (!(idx & 0x80000000))
- return;
- idx &= 0x00ffffff;
-
- for (i = 0; i < 6; i++) {
- nv_wr32(dev, 0x100c90, idx | i << 24);
- trap[i] = nv_rd32(dev, 0x100c94);
- }
- nv_wr32(dev, 0x100c90, idx | 0x80000000);
-
- if (!display)
- return;
-
- /* lookup channel id */
- chinst = (trap[2] << 16) | trap[1];
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (ch = 0; ch < pfifo->channels; ch++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[ch];
-
- if (!chan || !chan->ramin)
- continue;
-
- if (chinst == chan->ramin->vinst >> 12)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
-
- /* decode status bits into something more useful */
- if (dev_priv->chipset < 0xa3 ||
- dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
- st0 = (trap[0] & 0x0000000f) >> 0;
- st1 = (trap[0] & 0x000000f0) >> 4;
- st2 = (trap[0] & 0x00000f00) >> 8;
- st3 = (trap[0] & 0x0000f000) >> 12;
- } else {
- st0 = (trap[0] & 0x000000ff) >> 0;
- st1 = (trap[0] & 0x0000ff00) >> 8;
- st2 = (trap[0] & 0x00ff0000) >> 16;
- st3 = (trap[0] & 0xff000000) >> 24;
- }
-
- NV_INFO(dev, "VM: trapped %s at 0x%02x%04x%04x on ch %d [0x%08x] ",
- (trap[5] & 0x00000100) ? "read" : "write",
- trap[5] & 0xff, trap[4] & 0xffff, trap[3] & 0xffff, ch, chinst);
-
- en = nouveau_enum_find(vm_engine, st0);
- if (en)
- printk("%s/", en->name);
- else
- printk("%02x/", st0);
-
- cl = nouveau_enum_find(vm_client, st2);
- if (cl)
- printk("%s/", cl->name);
- else
- printk("%02x/", st2);
-
- if (cl && cl->data) cl = nouveau_enum_find(cl->data, st3);
- else if (en && en->data) cl = nouveau_enum_find(en->data, st3);
- else cl = NULL;
- if (cl)
- printk("%s", cl->name);
- else
- printk("%02x", st3);
-
- printk(" reason: ");
- en = nouveau_enum_find(vm_fault, st1);
- if (en)
- printk("%s\n", en->name);
- else
- printk("0x%08x\n", st1);
-}
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_fbcon.h"
-#include "nouveau_mm.h"
+#include <core/mm.h>
int
nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
+++ /dev/null
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-#include "nouveau_vm.h"
-
-struct nv50_fifo_priv {
- struct nouveau_fifo_priv base;
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
-};
-
-struct nv50_fifo_chan {
- struct nouveau_fifo_chan base;
-};
-
-void
-nv50_fifo_playlist_update(struct drm_device *dev)
-{
- struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *cur;
- int i, p;
-
- cur = priv->playlist[priv->cur_playlist];
- priv->cur_playlist = !priv->cur_playlist;
-
- for (i = 0, p = 0; i < priv->base.channels; i++) {
- if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
- nv_wo32(cur, p++ * 4, i);
- }
-
- dev_priv->engine.instmem.flush(dev);
-
- nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
- nv_wr32(dev, 0x0032ec, p);
- nv_wr32(dev, 0x002500, 0x00000101);
-}
-
-static int
-nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
- struct nv50_fifo_chan *fctx;
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
- u64 instance = chan->ramin->vinst >> 12;
- unsigned long flags;
- int ret = 0, i;
-
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
- atomic_inc(&chan->vm->engref[engine]);
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
-
- for (i = 0; i < 0x100; i += 4)
- nv_wo32(chan->ramin, i, 0x00000000);
- nv_wo32(chan->ramin, 0x3c, 0x403f6078);
- nv_wo32(chan->ramin, 0x40, 0x00000000);
- nv_wo32(chan->ramin, 0x44, 0x01003fff);
- nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
- nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
- nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
- drm_order(chan->dma.ib_max + 1) << 16);
- nv_wo32(chan->ramin, 0x60, 0x7fffffff);
- nv_wo32(chan->ramin, 0x78, 0x00000000);
- nv_wo32(chan->ramin, 0x7c, 0x30000001);
- nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->cinst >> 4));
-
- dev_priv->engine.instmem.flush(dev);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
- nv50_fifo_playlist_update(dev);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
-}
-
-static bool
-nv50_fifo_kickoff(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
- bool done = true;
- u32 me;
-
- /* HW bug workaround:
- *
- * PFIFO will hang forever if the connected engines don't report
- * that they've processed the context switch request.
- *
- * In order for the kickoff to work, we need to ensure all the
- * connected engines are in a state where they can answer.
- *
- * Newer chipsets don't seem to suffer from this issue, and well,
- * there's also a "ignore these engines" bitmask reg we can use
- * if we hit the issue there..
- */
-
- /* PME: make sure engine is enabled */
- me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
-
- /* do the kickoff... */
- nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
- if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
- NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
- done = false;
- }
-
- /* restore any engine states we changed, and exit */
- nv_wr32(dev, 0x00b860, me);
- return done;
-}
-
-static void
-nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nv50_fifo_chan *fctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- /* remove channel from playlist, will context switch if active */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
- nv50_fifo_playlist_update(dev);
-
- /* tell any engines on this channel to unload their contexts */
- nv50_fifo_kickoff(chan);
-
- nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* clean up */
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
- }
-
- atomic_dec(&chan->vm->engref[engine]);
- chan->engctx[engine] = NULL;
- kfree(fctx);
-}
-
-static int
-nv50_fifo_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 instance;
- int i;
-
- nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
- nv_wr32(dev, 0x00250c, 0x6f3cfc34);
- nv_wr32(dev, 0x002044, 0x01003fff);
-
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
-
- for (i = 0; i < 128; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
- if (chan && chan->engctx[engine])
- instance = 0x80000000 | chan->ramin->vinst >> 12;
- else
- instance = 0x00000000;
- nv_wr32(dev, 0x002600 + (i * 4), instance);
- }
-
- nv50_fifo_playlist_update(dev);
-
- nv_wr32(dev, 0x003200, 1);
- nv_wr32(dev, 0x003250, 1);
- nv_wr32(dev, 0x002500, 1);
- return 0;
-}
-
-static int
-nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv = nv_engine(dev, engine);
- int i;
-
- /* set playlist length to zero, fifo will unload context */
- nv_wr32(dev, 0x0032ec, 0);
-
- /* tell all connected engines to unload their contexts */
- for (i = 0; i < priv->base.channels; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
- if (chan && !nv50_fifo_kickoff(chan))
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x002140, 0);
- return 0;
-}
-
-void
-nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
-{
- nv50_vm_flush_engine(dev, 5);
-}
-
-void
-nv50_fifo_destroy(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 8);
-
- nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
- nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
-
- dev_priv->eng[engine] = NULL;
- kfree(priv);
-}
-
-int
-nv50_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_fifo_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.base.destroy = nv50_fifo_destroy;
- priv->base.base.init = nv50_fifo_init;
- priv->base.base.fini = nv50_fifo_fini;
- priv->base.base.context_new = nv50_fifo_context_new;
- priv->base.base.context_del = nv50_fifo_context_del;
- priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
- priv->base.channels = 127;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
- if (ret)
- goto error;
-
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
-error:
- if (ret)
- priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
- return ret;
-}
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/dmi.h>
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_hw.h"
-#include "nouveau_gpio.h"
-
-#include "nv50_display.h"
-
-static int
-nv50_gpio_location(int line, u32 *reg, u32 *shift)
-{
- const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
-
- if (line >= 32)
- return -EINVAL;
-
- *reg = nv50_gpio_reg[line >> 3];
- *shift = (line & 7) << 2;
- return 0;
-}
-
-int
-nv50_gpio_drive(struct drm_device *dev, int line, int dir, int out)
-{
- u32 reg, shift;
-
- if (nv50_gpio_location(line, ®, &shift))
- return -EINVAL;
-
- nv_mask(dev, reg, 7 << shift, (((dir ^ 1) << 1) | out) << shift);
- return 0;
-}
-
-int
-nv50_gpio_sense(struct drm_device *dev, int line)
-{
- u32 reg, shift;
-
- if (nv50_gpio_location(line, ®, &shift))
- return -EINVAL;
-
- return !!(nv_rd32(dev, reg) & (4 << shift));
-}
-
-void
-nv50_gpio_irq_enable(struct drm_device *dev, int line, bool on)
-{
- u32 reg = line < 16 ? 0xe050 : 0xe070;
- u32 mask = 0x00010001 << (line & 0xf);
-
- nv_wr32(dev, reg + 4, mask);
- nv_mask(dev, reg + 0, mask, on ? mask : 0);
-}
-
-int
-nvd0_gpio_drive(struct drm_device *dev, int line, int dir, int out)
-{
- u32 data = ((dir ^ 1) << 13) | (out << 12);
- nv_mask(dev, 0x00d610 + (line * 4), 0x00003000, data);
- nv_mask(dev, 0x00d604, 0x00000001, 0x00000001); /* update? */
- return 0;
-}
-
-int
-nvd0_gpio_sense(struct drm_device *dev, int line)
-{
- return !!(nv_rd32(dev, 0x00d610 + (line * 4)) & 0x00004000);
-}
-
-static void
-nv50_gpio_isr(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 intr0, intr1 = 0;
- u32 hi, lo;
-
- intr0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
- if (dev_priv->chipset >= 0x90)
- intr1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
-
- hi = (intr0 & 0x0000ffff) | (intr1 << 16);
- lo = (intr0 >> 16) | (intr1 & 0xffff0000);
- nouveau_gpio_isr(dev, 0, hi | lo);
-
- nv_wr32(dev, 0xe054, intr0);
- if (dev_priv->chipset >= 0x90)
- nv_wr32(dev, 0xe074, intr1);
-}
-
-static struct dmi_system_id gpio_reset_ids[] = {
- {
- .ident = "Apple Macbook 10,1",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro10,1"),
- }
- },
- { }
-};
-
-int
-nv50_gpio_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- /* initialise gpios and routing to vbios defaults */
- if (dmi_check_system(gpio_reset_ids))
- nouveau_gpio_reset(dev);
-
- /* disable, and ack any pending gpio interrupts */
- nv_wr32(dev, 0xe050, 0x00000000);
- nv_wr32(dev, 0xe054, 0xffffffff);
- if (dev_priv->chipset >= 0x90) {
- nv_wr32(dev, 0xe070, 0x00000000);
- nv_wr32(dev, 0xe074, 0xffffffff);
- }
-
- nouveau_irq_register(dev, 21, nv50_gpio_isr);
- return 0;
-}
-
-void
-nv50_gpio_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- nv_wr32(dev, 0xe050, 0x00000000);
- if (dev_priv->chipset >= 0x90)
- nv_wr32(dev, 0xe070, 0x00000000);
- nouveau_irq_unregister(dev, 21);
-}
+++ /dev/null
-/*
- * Copyright (C) 2007 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-#include "nouveau_dma.h"
-#include "nouveau_vm.h"
-#include "nv50_evo.h"
-
-struct nv50_graph_engine {
- struct nouveau_exec_engine base;
- u32 ctxprog[512];
- u32 ctxprog_size;
- u32 grctx_size;
-};
-
-static int
-nv50_graph_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
- u32 units = nv_rd32(dev, 0x001540);
- int i;
-
- NV_DEBUG(dev, "\n");
-
- /* master reset */
- nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
- nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
-
- /* reset/enable traps and interrupts */
- nv_wr32(dev, 0x400804, 0xc0000000);
- nv_wr32(dev, 0x406800, 0xc0000000);
- nv_wr32(dev, 0x400c04, 0xc0000000);
- nv_wr32(dev, 0x401800, 0xc0000000);
- nv_wr32(dev, 0x405018, 0xc0000000);
- nv_wr32(dev, 0x402000, 0xc0000000);
- for (i = 0; i < 16; i++) {
- if (!(units & (1 << i)))
- continue;
-
- if (dev_priv->chipset < 0xa0) {
- nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
- nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
- nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
- } else {
- nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
- nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
- nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
- }
- }
-
- nv_wr32(dev, 0x400108, 0xffffffff);
- nv_wr32(dev, 0x400138, 0xffffffff);
- nv_wr32(dev, 0x400100, 0xffffffff);
- nv_wr32(dev, 0x40013c, 0xffffffff);
- nv_wr32(dev, 0x400500, 0x00010001);
-
- /* upload context program, initialise ctxctl defaults */
- nv_wr32(dev, 0x400324, 0x00000000);
- for (i = 0; i < pgraph->ctxprog_size; i++)
- nv_wr32(dev, 0x400328, pgraph->ctxprog[i]);
- nv_wr32(dev, 0x400824, 0x00000000);
- nv_wr32(dev, 0x400828, 0x00000000);
- nv_wr32(dev, 0x40082c, 0x00000000);
- nv_wr32(dev, 0x400830, 0x00000000);
- nv_wr32(dev, 0x400724, 0x00000000);
- nv_wr32(dev, 0x40032c, 0x00000000);
- nv_wr32(dev, 0x400320, 4); /* CTXCTL_CMD = NEWCTXDMA */
-
- /* some unknown zcull magic */
- switch (dev_priv->chipset & 0xf0) {
- case 0x50:
- case 0x80:
- case 0x90:
- nv_wr32(dev, 0x402ca8, 0x00000800);
- break;
- case 0xa0:
- default:
- nv_wr32(dev, 0x402cc0, 0x00000000);
- if (dev_priv->chipset == 0xa0 ||
- dev_priv->chipset == 0xaa ||
- dev_priv->chipset == 0xac) {
- nv_wr32(dev, 0x402ca8, 0x00000802);
- } else {
- nv_wr32(dev, 0x402cc0, 0x00000000);
- nv_wr32(dev, 0x402ca8, 0x00000002);
- }
-
- break;
- }
-
- /* zero out zcull regions */
- for (i = 0; i < 8; i++) {
- nv_wr32(dev, 0x402c20 + (i * 8), 0x00000000);
- nv_wr32(dev, 0x402c24 + (i * 8), 0x00000000);
- nv_wr32(dev, 0x402c28 + (i * 8), 0x00000000);
- nv_wr32(dev, 0x402c2c + (i * 8), 0x00000000);
- }
-
- return 0;
-}
-
-static int
-nv50_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_wr32(dev, 0x40013c, 0x00000000);
- return 0;
-}
-
-static int
-nv50_graph_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramin = chan->ramin;
- struct nouveau_gpuobj *grctx = NULL;
- struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
- int hdr, ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- ret = nouveau_gpuobj_new(dev, NULL, pgraph->grctx_size, 0,
- NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &grctx);
- if (ret)
- return ret;
-
- hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
- nv_wo32(ramin, hdr + 0x00, 0x00190002);
- nv_wo32(ramin, hdr + 0x04, grctx->vinst + grctx->size - 1);
- nv_wo32(ramin, hdr + 0x08, grctx->vinst);
- nv_wo32(ramin, hdr + 0x0c, 0);
- nv_wo32(ramin, hdr + 0x10, 0);
- nv_wo32(ramin, hdr + 0x14, 0x00010000);
-
- nv50_grctx_fill(dev, grctx);
- nv_wo32(grctx, 0x00000, chan->ramin->vinst >> 12);
-
- dev_priv->engine.instmem.flush(dev);
-
- atomic_inc(&chan->vm->engref[NVOBJ_ENGINE_GR]);
- chan->engctx[NVOBJ_ENGINE_GR] = grctx;
- return 0;
-}
-
-static void
-nv50_graph_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nouveau_gpuobj *grctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20;
-
- for (i = hdr; i < hdr + 24; i += 4)
- nv_wo32(chan->ramin, i, 0);
- dev_priv->engine.instmem.flush(dev);
-
- atomic_dec(&chan->vm->engref[engine]);
- nouveau_gpuobj_ref(NULL, &grctx);
- chan->engctx[engine] = NULL;
-}
-
-static int
-nv50_graph_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
- if (ret)
- return ret;
- obj->engine = 1;
- obj->class = class;
-
- nv_wo32(obj, 0x00, class);
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
- dev_priv->engine.instmem.flush(dev);
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-
-static void
-nv50_graph_tlb_flush(struct drm_device *dev, int engine)
-{
- nv50_vm_flush_engine(dev, 0);
-}
-
-static void
-nv84_graph_tlb_flush(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
- bool idle, timeout = false;
- unsigned long flags;
- u64 start;
- u32 tmp;
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x400500, 0x00000001, 0x00000000);
-
- start = ptimer->read(dev);
- do {
- idle = true;
-
- for (tmp = nv_rd32(dev, 0x400380); tmp && idle; tmp >>= 3) {
- if ((tmp & 7) == 1)
- idle = false;
- }
-
- for (tmp = nv_rd32(dev, 0x400384); tmp && idle; tmp >>= 3) {
- if ((tmp & 7) == 1)
- idle = false;
- }
-
- for (tmp = nv_rd32(dev, 0x400388); tmp && idle; tmp >>= 3) {
- if ((tmp & 7) == 1)
- idle = false;
- }
- } while (!idle && !(timeout = ptimer->read(dev) - start > 2000000000));
-
- if (timeout) {
- NV_ERROR(dev, "PGRAPH TLB flush idle timeout fail: "
- "0x%08x 0x%08x 0x%08x 0x%08x\n",
- nv_rd32(dev, 0x400700), nv_rd32(dev, 0x400380),
- nv_rd32(dev, 0x400384), nv_rd32(dev, 0x400388));
- }
-
- nv50_vm_flush_engine(dev, 0);
-
- nv_mask(dev, 0x400500, 0x00000001, 0x00000001);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-}
-
-static struct nouveau_enum nv50_mp_exec_error_names[] = {
- { 3, "STACK_UNDERFLOW", NULL },
- { 4, "QUADON_ACTIVE", NULL },
- { 8, "TIMEOUT", NULL },
- { 0x10, "INVALID_OPCODE", NULL },
- { 0x40, "BREAKPOINT", NULL },
- {}
-};
-
-static struct nouveau_bitfield nv50_graph_trap_m2mf[] = {
- { 0x00000001, "NOTIFY" },
- { 0x00000002, "IN" },
- { 0x00000004, "OUT" },
- {}
-};
-
-static struct nouveau_bitfield nv50_graph_trap_vfetch[] = {
- { 0x00000001, "FAULT" },
- {}
-};
-
-static struct nouveau_bitfield nv50_graph_trap_strmout[] = {
- { 0x00000001, "FAULT" },
- {}
-};
-
-static struct nouveau_bitfield nv50_graph_trap_ccache[] = {
- { 0x00000001, "FAULT" },
- {}
-};
-
-/* There must be a *lot* of these. Will take some time to gather them up. */
-struct nouveau_enum nv50_data_error_names[] = {
- { 0x00000003, "INVALID_OPERATION", NULL },
- { 0x00000004, "INVALID_VALUE", NULL },
- { 0x00000005, "INVALID_ENUM", NULL },
- { 0x00000008, "INVALID_OBJECT", NULL },
- { 0x00000009, "READ_ONLY_OBJECT", NULL },
- { 0x0000000a, "SUPERVISOR_OBJECT", NULL },
- { 0x0000000b, "INVALID_ADDRESS_ALIGNMENT", NULL },
- { 0x0000000c, "INVALID_BITFIELD", NULL },
- { 0x0000000d, "BEGIN_END_ACTIVE", NULL },
- { 0x0000000e, "SEMANTIC_COLOR_BACK_OVER_LIMIT", NULL },
- { 0x0000000f, "VIEWPORT_ID_NEEDS_GP", NULL },
- { 0x00000010, "RT_DOUBLE_BIND", NULL },
- { 0x00000011, "RT_TYPES_MISMATCH", NULL },
- { 0x00000012, "RT_LINEAR_WITH_ZETA", NULL },
- { 0x00000015, "FP_TOO_FEW_REGS", NULL },
- { 0x00000016, "ZETA_FORMAT_CSAA_MISMATCH", NULL },
- { 0x00000017, "RT_LINEAR_WITH_MSAA", NULL },
- { 0x00000018, "FP_INTERPOLANT_START_OVER_LIMIT", NULL },
- { 0x00000019, "SEMANTIC_LAYER_OVER_LIMIT", NULL },
- { 0x0000001a, "RT_INVALID_ALIGNMENT", NULL },
- { 0x0000001b, "SAMPLER_OVER_LIMIT", NULL },
- { 0x0000001c, "TEXTURE_OVER_LIMIT", NULL },
- { 0x0000001e, "GP_TOO_MANY_OUTPUTS", NULL },
- { 0x0000001f, "RT_BPP128_WITH_MS8", NULL },
- { 0x00000021, "Z_OUT_OF_BOUNDS", NULL },
- { 0x00000023, "XY_OUT_OF_BOUNDS", NULL },
- { 0x00000024, "VP_ZERO_INPUTS", NULL },
- { 0x00000027, "CP_MORE_PARAMS_THAN_SHARED", NULL },
- { 0x00000028, "CP_NO_REG_SPACE_STRIPED", NULL },
- { 0x00000029, "CP_NO_REG_SPACE_PACKED", NULL },
- { 0x0000002a, "CP_NOT_ENOUGH_WARPS", NULL },
- { 0x0000002b, "CP_BLOCK_SIZE_MISMATCH", NULL },
- { 0x0000002c, "CP_NOT_ENOUGH_LOCAL_WARPS", NULL },
- { 0x0000002d, "CP_NOT_ENOUGH_STACK_WARPS", NULL },
- { 0x0000002e, "CP_NO_BLOCKDIM_LATCH", NULL },
- { 0x00000031, "ENG2D_FORMAT_MISMATCH", NULL },
- { 0x0000003f, "PRIMITIVE_ID_NEEDS_GP", NULL },
- { 0x00000044, "SEMANTIC_VIEWPORT_OVER_LIMIT", NULL },
- { 0x00000045, "SEMANTIC_COLOR_FRONT_OVER_LIMIT", NULL },
- { 0x00000046, "LAYER_ID_NEEDS_GP", NULL },
- { 0x00000047, "SEMANTIC_CLIP_OVER_LIMIT", NULL },
- { 0x00000048, "SEMANTIC_PTSZ_OVER_LIMIT", NULL },
- {}
-};
-
-static struct nouveau_bitfield nv50_graph_intr[] = {
- { 0x00000001, "NOTIFY" },
- { 0x00000002, "COMPUTE_QUERY" },
- { 0x00000010, "ILLEGAL_MTHD" },
- { 0x00000020, "ILLEGAL_CLASS" },
- { 0x00000040, "DOUBLE_NOTIFY" },
- { 0x00001000, "CONTEXT_SWITCH" },
- { 0x00010000, "BUFFER_NOTIFY" },
- { 0x00100000, "DATA_ERROR" },
- { 0x00200000, "TRAP" },
- { 0x01000000, "SINGLE_STEP" },
- {}
-};
-
-static void
-nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- uint32_t units = nv_rd32(dev, 0x1540);
- uint32_t addr, mp10, status, pc, oplow, ophigh;
- int i;
- int mps = 0;
- for (i = 0; i < 4; i++) {
- if (!(units & 1 << (i+24)))
- continue;
- if (dev_priv->chipset < 0xa0)
- addr = 0x408200 + (tpid << 12) + (i << 7);
- else
- addr = 0x408100 + (tpid << 11) + (i << 7);
- mp10 = nv_rd32(dev, addr + 0x10);
- status = nv_rd32(dev, addr + 0x14);
- if (!status)
- continue;
- if (display) {
- nv_rd32(dev, addr + 0x20);
- pc = nv_rd32(dev, addr + 0x24);
- oplow = nv_rd32(dev, addr + 0x70);
- ophigh = nv_rd32(dev, addr + 0x74);
- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
- "TP %d MP %d: ", tpid, i);
- nouveau_enum_print(nv50_mp_exec_error_names, status);
- printk(" at %06x warp %d, opcode %08x %08x\n",
- pc&0xffffff, pc >> 24,
- oplow, ophigh);
- }
- nv_wr32(dev, addr + 0x10, mp10);
- nv_wr32(dev, addr + 0x14, 0);
- mps++;
- }
- if (!mps && display)
- NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
- "No MPs claiming errors?\n", tpid);
-}
-
-static void
-nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
- uint32_t ustatus_new, int display, const char *name)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int tps = 0;
- uint32_t units = nv_rd32(dev, 0x1540);
- int i, r;
- uint32_t ustatus_addr, ustatus;
- for (i = 0; i < 16; i++) {
- if (!(units & (1 << i)))
- continue;
- if (dev_priv->chipset < 0xa0)
- ustatus_addr = ustatus_old + (i << 12);
- else
- ustatus_addr = ustatus_new + (i << 11);
- ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
- if (!ustatus)
- continue;
- tps++;
- switch (type) {
- case 6: /* texture error... unknown for now */
- if (display) {
- NV_ERROR(dev, "magic set %d:\n", i);
- for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- }
- break;
- case 7: /* MP error */
- if (ustatus & 0x04030000) {
- nv50_pgraph_mp_trap(dev, i, display);
- ustatus &= ~0x04030000;
- }
- break;
- case 8: /* TPDMA error */
- {
- uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
- uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
- uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
- uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
- uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
- uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
- uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
- /* 2d engine destination */
- if (ustatus & 0x00000010) {
- if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000010;
- }
- /* Render target */
- if (ustatus & 0x00000040) {
- if (display) {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
- i, e14, e10);
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000040;
- }
- /* CUDA memory: l[], g[] or stack. */
- if (ustatus & 0x00000080) {
- if (display) {
- if (e18 & 0x80000000) {
- /* g[] read fault? */
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 24) & 0x1f));
- e18 &= ~0x1f000000;
- } else if (e18 & 0xc) {
- /* g[] write fault? */
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
- i, e14, e10 | ((e18 >> 7) & 0x1f));
- e18 &= ~0x00000f80;
- } else {
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
- i, e14, e10);
- }
- NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
- i, e0c, e18, e1c, e20, e24);
- }
- ustatus &= ~0x00000080;
- }
- }
- break;
- }
- if (ustatus) {
- if (display)
- NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
- }
- nv_wr32(dev, ustatus_addr, 0xc0000000);
- }
-
- if (!tps && display)
- NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
-}
-
-static int
-nv50_pgraph_trap_handler(struct drm_device *dev, u32 display, u64 inst, u32 chid)
-{
- u32 status = nv_rd32(dev, 0x400108);
- u32 ustatus;
-
- if (!status && display) {
- NV_INFO(dev, "PGRAPH - TRAP: no units reporting traps?\n");
- return 1;
- }
-
- /* DISPATCH: Relays commands to other units and handles NOTIFY,
- * COND, QUERY. If you get a trap from it, the command is still stuck
- * in DISPATCH and you need to do something about it. */
- if (status & 0x001) {
- ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
- if (!ustatus && display) {
- NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
- }
-
- nv_wr32(dev, 0x400500, 0x00000000);
-
- /* Known to be triggered by screwed up NOTIFY and COND... */
- if (ustatus & 0x00000001) {
- u32 addr = nv_rd32(dev, 0x400808);
- u32 subc = (addr & 0x00070000) >> 16;
- u32 mthd = (addr & 0x00001ffc);
- u32 datal = nv_rd32(dev, 0x40080c);
- u32 datah = nv_rd32(dev, 0x400810);
- u32 class = nv_rd32(dev, 0x400814);
- u32 r848 = nv_rd32(dev, 0x400848);
-
- NV_INFO(dev, "PGRAPH - TRAP DISPATCH_FAULT\n");
- if (display && (addr & 0x80000000)) {
- NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x%08x "
- "400808 0x%08x 400848 0x%08x\n",
- chid, inst, subc, class, mthd, datah,
- datal, addr, r848);
- } else
- if (display) {
- NV_INFO(dev, "PGRAPH - no stuck command?\n");
- }
-
- nv_wr32(dev, 0x400808, 0);
- nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
- nv_wr32(dev, 0x400848, 0);
- ustatus &= ~0x00000001;
- }
-
- if (ustatus & 0x00000002) {
- u32 addr = nv_rd32(dev, 0x40084c);
- u32 subc = (addr & 0x00070000) >> 16;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, 0x40085c);
- u32 class = nv_rd32(dev, 0x400814);
-
- NV_INFO(dev, "PGRAPH - TRAP DISPATCH_QUERY\n");
- if (display && (addr & 0x80000000)) {
- NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x 40084c 0x%08x\n",
- chid, inst, subc, class, mthd,
- data, addr);
- } else
- if (display) {
- NV_INFO(dev, "PGRAPH - no stuck command?\n");
- }
-
- nv_wr32(dev, 0x40084c, 0);
- ustatus &= ~0x00000002;
- }
-
- if (ustatus && display) {
- NV_INFO(dev, "PGRAPH - TRAP_DISPATCH (unknown "
- "0x%08x)\n", ustatus);
- }
-
- nv_wr32(dev, 0x400804, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x001);
- status &= ~0x001;
- if (!status)
- return 0;
- }
-
- /* M2MF: Memory to memory copy engine. */
- if (status & 0x002) {
- u32 ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
- if (display) {
- NV_INFO(dev, "PGRAPH - TRAP_M2MF");
- nouveau_bitfield_print(nv50_graph_trap_m2mf, ustatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - TRAP_M2MF %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x406804), nv_rd32(dev, 0x406808),
- nv_rd32(dev, 0x40680c), nv_rd32(dev, 0x406810));
-
- }
-
- /* No sane way found yet -- just reset the bugger. */
- nv_wr32(dev, 0x400040, 2);
- nv_wr32(dev, 0x400040, 0);
- nv_wr32(dev, 0x406800, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x002);
- status &= ~0x002;
- }
-
- /* VFETCH: Fetches data from vertex buffers. */
- if (status & 0x004) {
- u32 ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
- if (display) {
- NV_INFO(dev, "PGRAPH - TRAP_VFETCH");
- nouveau_bitfield_print(nv50_graph_trap_vfetch, ustatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - TRAP_VFETCH %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x400c00), nv_rd32(dev, 0x400c08),
- nv_rd32(dev, 0x400c0c), nv_rd32(dev, 0x400c10));
- }
-
- nv_wr32(dev, 0x400c04, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x004);
- status &= ~0x004;
- }
-
- /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
- if (status & 0x008) {
- ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
- if (display) {
- NV_INFO(dev, "PGRAPH - TRAP_STRMOUT");
- nouveau_bitfield_print(nv50_graph_trap_strmout, ustatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - TRAP_STRMOUT %08x %08x %08x %08x\n",
- nv_rd32(dev, 0x401804), nv_rd32(dev, 0x401808),
- nv_rd32(dev, 0x40180c), nv_rd32(dev, 0x401810));
-
- }
-
- /* No sane way found yet -- just reset the bugger. */
- nv_wr32(dev, 0x400040, 0x80);
- nv_wr32(dev, 0x400040, 0);
- nv_wr32(dev, 0x401800, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x008);
- status &= ~0x008;
- }
-
- /* CCACHE: Handles code and c[] caches and fills them. */
- if (status & 0x010) {
- ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
- if (display) {
- NV_INFO(dev, "PGRAPH - TRAP_CCACHE");
- nouveau_bitfield_print(nv50_graph_trap_ccache, ustatus);
- printk("\n");
- NV_INFO(dev, "PGRAPH - TRAP_CCACHE %08x %08x %08x %08x"
- " %08x %08x %08x\n",
- nv_rd32(dev, 0x405000), nv_rd32(dev, 0x405004),
- nv_rd32(dev, 0x405008), nv_rd32(dev, 0x40500c),
- nv_rd32(dev, 0x405010), nv_rd32(dev, 0x405014),
- nv_rd32(dev, 0x40501c));
-
- }
-
- nv_wr32(dev, 0x405018, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x010);
- status &= ~0x010;
- }
-
- /* Unknown, not seen yet... 0x402000 is the only trap status reg
- * remaining, so try to handle it anyway. Perhaps related to that
- * unknown DMA slot on tesla? */
- if (status & 0x20) {
- ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
- if (display)
- NV_INFO(dev, "PGRAPH - TRAP_UNKC04 0x%08x\n", ustatus);
- nv_wr32(dev, 0x402000, 0xc0000000);
- /* no status modifiction on purpose */
- }
-
- /* TEXTURE: CUDA texturing units */
- if (status & 0x040) {
- nv50_pgraph_tp_trap(dev, 6, 0x408900, 0x408600, display,
- "PGRAPH - TRAP_TEXTURE");
- nv_wr32(dev, 0x400108, 0x040);
- status &= ~0x040;
- }
-
- /* MP: CUDA execution engines. */
- if (status & 0x080) {
- nv50_pgraph_tp_trap(dev, 7, 0x408314, 0x40831c, display,
- "PGRAPH - TRAP_MP");
- nv_wr32(dev, 0x400108, 0x080);
- status &= ~0x080;
- }
-
- /* TPDMA: Handles TP-initiated uncached memory accesses:
- * l[], g[], stack, 2d surfaces, render targets. */
- if (status & 0x100) {
- nv50_pgraph_tp_trap(dev, 8, 0x408e08, 0x408708, display,
- "PGRAPH - TRAP_TPDMA");
- nv_wr32(dev, 0x400108, 0x100);
- status &= ~0x100;
- }
-
- if (status) {
- if (display)
- NV_INFO(dev, "PGRAPH - TRAP: unknown 0x%08x\n", status);
- nv_wr32(dev, 0x400108, status);
- }
-
- return 1;
-}
-
-int
-nv50_graph_isr_chid(struct drm_device *dev, u64 inst)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < pfifo->channels; i++) {
- chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->ramin)
- continue;
-
- if (inst == chan->ramin->vinst)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
-}
-
-static void
-nv50_graph_isr(struct drm_device *dev)
-{
- u32 stat;
-
- while ((stat = nv_rd32(dev, 0x400100))) {
- u64 inst = (u64)(nv_rd32(dev, 0x40032c) & 0x0fffffff) << 12;
- u32 chid = nv50_graph_isr_chid(dev, inst);
- u32 addr = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
- u32 subc = (addr & 0x00070000) >> 16;
- u32 mthd = (addr & 0x00001ffc);
- u32 data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
- u32 class = nv_rd32(dev, 0x400814);
- u32 show = stat;
-
- if (stat & 0x00000010) {
- if (!nouveau_gpuobj_mthd_call2(dev, chid, class,
- mthd, data))
- show &= ~0x00000010;
- }
-
- show = (show && nouveau_ratelimit()) ? show : 0;
-
- if (show & 0x00100000) {
- u32 ecode = nv_rd32(dev, 0x400110);
- NV_INFO(dev, "PGRAPH - DATA_ERROR ");
- nouveau_enum_print(nv50_data_error_names, ecode);
- printk("\n");
- }
-
- if (stat & 0x00200000) {
- if (!nv50_pgraph_trap_handler(dev, show, inst, chid))
- show &= ~0x00200000;
- }
-
- nv_wr32(dev, 0x400100, stat);
- nv_wr32(dev, 0x400500, 0x00010001);
-
- if (show) {
- NV_INFO(dev, "PGRAPH -");
- nouveau_bitfield_print(nv50_graph_intr, show);
- printk("\n");
- NV_INFO(dev, "PGRAPH - ch %d (0x%010llx) subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- nv50_fb_vm_trap(dev, 1);
- }
- }
-
- if (nv_rd32(dev, 0x400824) & (1 << 31))
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
-}
-
-static void
-nv50_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nv50_graph_engine *pgraph = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, GR);
-
- nouveau_irq_unregister(dev, 12);
- kfree(pgraph);
-}
-
-int
-nv50_graph_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_graph_engine *pgraph;
- int ret;
-
- pgraph = kzalloc(sizeof(*pgraph),GFP_KERNEL);
- if (!pgraph)
- return -ENOMEM;
-
- ret = nv50_grctx_init(dev, pgraph->ctxprog, ARRAY_SIZE(pgraph->ctxprog),
- &pgraph->ctxprog_size,
- &pgraph->grctx_size);
- if (ret) {
- NV_ERROR(dev, "PGRAPH: ctxprog build failed\n");
- kfree(pgraph);
- return 0;
- }
-
- pgraph->base.destroy = nv50_graph_destroy;
- pgraph->base.init = nv50_graph_init;
- pgraph->base.fini = nv50_graph_fini;
- pgraph->base.context_new = nv50_graph_context_new;
- pgraph->base.context_del = nv50_graph_context_del;
- pgraph->base.object_new = nv50_graph_object_new;
- if (dev_priv->chipset == 0x50 || dev_priv->chipset == 0xac)
- pgraph->base.tlb_flush = nv50_graph_tlb_flush;
- else
- pgraph->base.tlb_flush = nv84_graph_tlb_flush;
-
- nouveau_irq_register(dev, 12, nv50_graph_isr);
-
- NVOBJ_ENGINE_ADD(dev, GR, &pgraph->base);
- NVOBJ_CLASS(dev, 0x0030, GR); /* null */
- NVOBJ_CLASS(dev, 0x5039, GR); /* m2mf */
- NVOBJ_CLASS(dev, 0x502d, GR); /* 2d */
-
- /* tesla */
- if (dev_priv->chipset == 0x50)
- NVOBJ_CLASS(dev, 0x5097, GR); /* tesla (nv50) */
- else
- if (dev_priv->chipset < 0xa0)
- NVOBJ_CLASS(dev, 0x8297, GR); /* tesla (nv8x/nv9x) */
- else {
- switch (dev_priv->chipset) {
- case 0xa0:
- case 0xaa:
- case 0xac:
- NVOBJ_CLASS(dev, 0x8397, GR);
- break;
- case 0xa3:
- case 0xa5:
- case 0xa8:
- NVOBJ_CLASS(dev, 0x8597, GR);
- break;
- case 0xaf:
- NVOBJ_CLASS(dev, 0x8697, GR);
- break;
- }
- }
-
- /* compute */
- NVOBJ_CLASS(dev, 0x50c0, GR);
- if (dev_priv->chipset > 0xa0 &&
- dev_priv->chipset != 0xaa &&
- dev_priv->chipset != 0xac)
- NVOBJ_CLASS(dev, 0x85c0, GR);
-
- return 0;
-}
+++ /dev/null
-/*
- * Copyright 2009 Marcin Kościelnicki
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#define CP_FLAG_CLEAR 0
-#define CP_FLAG_SET 1
-#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
-#define CP_FLAG_SWAP_DIRECTION_LOAD 0
-#define CP_FLAG_SWAP_DIRECTION_SAVE 1
-#define CP_FLAG_UNK01 ((0 * 32) + 1)
-#define CP_FLAG_UNK01_CLEAR 0
-#define CP_FLAG_UNK01_SET 1
-#define CP_FLAG_UNK03 ((0 * 32) + 3)
-#define CP_FLAG_UNK03_CLEAR 0
-#define CP_FLAG_UNK03_SET 1
-#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
-#define CP_FLAG_USER_SAVE_NOT_PENDING 0
-#define CP_FLAG_USER_SAVE_PENDING 1
-#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
-#define CP_FLAG_USER_LOAD_NOT_PENDING 0
-#define CP_FLAG_USER_LOAD_PENDING 1
-#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
-#define CP_FLAG_UNK0B_CLEAR 0
-#define CP_FLAG_UNK0B_SET 1
-#define CP_FLAG_XFER_SWITCH ((0 * 32) + 0xe)
-#define CP_FLAG_XFER_SWITCH_DISABLE 0
-#define CP_FLAG_XFER_SWITCH_ENABLE 1
-#define CP_FLAG_STATE ((0 * 32) + 0x1c)
-#define CP_FLAG_STATE_STOPPED 0
-#define CP_FLAG_STATE_RUNNING 1
-#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
-#define CP_FLAG_UNK1D_CLEAR 0
-#define CP_FLAG_UNK1D_SET 1
-#define CP_FLAG_UNK20 ((1 * 32) + 0)
-#define CP_FLAG_UNK20_CLEAR 0
-#define CP_FLAG_UNK20_SET 1
-#define CP_FLAG_STATUS ((2 * 32) + 0)
-#define CP_FLAG_STATUS_BUSY 0
-#define CP_FLAG_STATUS_IDLE 1
-#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4)
-#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
-#define CP_FLAG_AUTO_SAVE_PENDING 1
-#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
-#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
-#define CP_FLAG_AUTO_LOAD_PENDING 1
-#define CP_FLAG_NEWCTX ((2 * 32) + 10)
-#define CP_FLAG_NEWCTX_BUSY 0
-#define CP_FLAG_NEWCTX_DONE 1
-#define CP_FLAG_XFER ((2 * 32) + 11)
-#define CP_FLAG_XFER_IDLE 0
-#define CP_FLAG_XFER_BUSY 1
-#define CP_FLAG_ALWAYS ((2 * 32) + 13)
-#define CP_FLAG_ALWAYS_FALSE 0
-#define CP_FLAG_ALWAYS_TRUE 1
-#define CP_FLAG_INTR ((2 * 32) + 15)
-#define CP_FLAG_INTR_NOT_PENDING 0
-#define CP_FLAG_INTR_PENDING 1
-
-#define CP_CTX 0x00100000
-#define CP_CTX_COUNT 0x000f0000
-#define CP_CTX_COUNT_SHIFT 16
-#define CP_CTX_REG 0x00003fff
-#define CP_LOAD_SR 0x00200000
-#define CP_LOAD_SR_VALUE 0x000fffff
-#define CP_BRA 0x00400000
-#define CP_BRA_IP 0x0001ff00
-#define CP_BRA_IP_SHIFT 8
-#define CP_BRA_IF_CLEAR 0x00000080
-#define CP_BRA_FLAG 0x0000007f
-#define CP_WAIT 0x00500000
-#define CP_WAIT_SET 0x00000080
-#define CP_WAIT_FLAG 0x0000007f
-#define CP_SET 0x00700000
-#define CP_SET_1 0x00000080
-#define CP_SET_FLAG 0x0000007f
-#define CP_NEWCTX 0x00600004
-#define CP_NEXT_TO_SWAP 0x00600005
-#define CP_SET_CONTEXT_POINTER 0x00600006
-#define CP_SET_XFER_POINTER 0x00600007
-#define CP_ENABLE 0x00600009
-#define CP_END 0x0060000c
-#define CP_NEXT_TO_CURRENT 0x0060000d
-#define CP_DISABLE1 0x0090ffff
-#define CP_DISABLE2 0x0091ffff
-#define CP_XFER_1 0x008000ff
-#define CP_XFER_2 0x008800ff
-#define CP_SEEK_1 0x00c000ff
-#define CP_SEEK_2 0x00c800ff
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_grctx.h"
-
-#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
-#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
-
-/*
- * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
- * the GPU itself that does context-switching, but it needs a special
- * microcode to do it. And it's the driver's task to supply this microcode,
- * further known as ctxprog, as well as the initial context values, known
- * as ctxvals.
- *
- * Without ctxprog, you cannot switch contexts. Not even in software, since
- * the majority of context [xfer strands] isn't accessible directly. You're
- * stuck with a single channel, and you also suffer all the problems resulting
- * from missing ctxvals, since you cannot load them.
- *
- * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to
- * run 2d operations, but trying to utilise 3d or CUDA will just lock you up,
- * since you don't have... some sort of needed setup.
- *
- * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since
- * it's too much hassle to handle no-ctxprog as a special case.
- */
-
-/*
- * How ctxprogs work.
- *
- * The ctxprog is written in its own kind of microcode, with very small and
- * crappy set of available commands. You upload it to a small [512 insns]
- * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to
- * switch channel. or when the driver explicitely requests it. Stuff visible
- * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands,
- * the per-channel context save area in VRAM [known as ctxvals or grctx],
- * 4 flags registers, a scratch register, two grctx pointers, plus many
- * random poorly-understood details.
- *
- * When ctxprog runs, it's supposed to check what operations are asked of it,
- * save old context if requested, optionally reset PGRAPH and switch to the
- * new channel, and load the new context. Context consists of three major
- * parts: subset of MMIO registers and two "xfer areas".
- */
-
-/* TODO:
- * - document unimplemented bits compared to nvidia
- * - NVAx: make a TP subroutine, use it.
- * - use 0x4008fc instead of 0x1540?
- */
-
-enum cp_label {
- cp_check_load = 1,
- cp_setup_auto_load,
- cp_setup_load,
- cp_setup_save,
- cp_swap_state,
- cp_prepare_exit,
- cp_exit,
-};
-
-static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
-
-/* Main function: construct the ctxprog skeleton, call the other functions. */
-
-static int
-nv50_grctx_generate(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
-
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0x98:
- case 0xa0:
- case 0xa3:
- case 0xa5:
- case 0xa8:
- case 0xaa:
- case 0xac:
- case 0xaf:
- break;
- default:
- NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
- "your NV%x card.\n", dev_priv->chipset);
- NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
- "the devs.\n");
- return -ENOSYS;
- }
-
- cp_set (ctx, STATE, RUNNING);
- cp_set (ctx, XFER_SWITCH, ENABLE);
- /* decide whether we're loading/unloading the context */
- cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
- cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
-
- cp_name(ctx, cp_check_load);
- cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
- cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
- cp_bra (ctx, ALWAYS, TRUE, cp_prepare_exit);
-
- /* setup for context load */
- cp_name(ctx, cp_setup_auto_load);
- cp_out (ctx, CP_DISABLE1);
- cp_out (ctx, CP_DISABLE2);
- cp_out (ctx, CP_ENABLE);
- cp_out (ctx, CP_NEXT_TO_SWAP);
- cp_set (ctx, UNK01, SET);
- cp_name(ctx, cp_setup_load);
- cp_out (ctx, CP_NEWCTX);
- cp_wait(ctx, NEWCTX, BUSY);
- cp_set (ctx, UNK1D, CLEAR);
- cp_set (ctx, SWAP_DIRECTION, LOAD);
- cp_bra (ctx, UNK0B, SET, cp_prepare_exit);
- cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
-
- /* setup for context save */
- cp_name(ctx, cp_setup_save);
- cp_set (ctx, UNK1D, SET);
- cp_wait(ctx, STATUS, BUSY);
- cp_wait(ctx, INTR, PENDING);
- cp_bra (ctx, STATUS, BUSY, cp_setup_save);
- cp_set (ctx, UNK01, SET);
- cp_set (ctx, SWAP_DIRECTION, SAVE);
-
- /* general PGRAPH state */
- cp_name(ctx, cp_swap_state);
- cp_set (ctx, UNK03, SET);
- cp_pos (ctx, 0x00004/4);
- cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
- cp_pos (ctx, 0x00100/4);
- nv50_graph_construct_mmio(ctx);
- nv50_graph_construct_xfer1(ctx);
- nv50_graph_construct_xfer2(ctx);
-
- cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
-
- cp_set (ctx, UNK20, SET);
- cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */
- cp_lsr (ctx, ctx->ctxvals_base);
- cp_out (ctx, CP_SET_XFER_POINTER);
- cp_lsr (ctx, 4);
- cp_out (ctx, CP_SEEK_1);
- cp_out (ctx, CP_XFER_1);
- cp_wait(ctx, XFER, BUSY);
-
- /* pre-exit state updates */
- cp_name(ctx, cp_prepare_exit);
- cp_set (ctx, UNK01, CLEAR);
- cp_set (ctx, UNK03, CLEAR);
- cp_set (ctx, UNK1D, CLEAR);
-
- cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
- cp_out (ctx, CP_NEXT_TO_CURRENT);
-
- cp_name(ctx, cp_exit);
- cp_set (ctx, USER_SAVE, NOT_PENDING);
- cp_set (ctx, USER_LOAD, NOT_PENDING);
- cp_set (ctx, XFER_SWITCH, DISABLE);
- cp_set (ctx, STATE, STOPPED);
- cp_out (ctx, CP_END);
- ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
-
- return 0;
-}
-
-void
-nv50_grctx_fill(struct drm_device *dev, struct nouveau_gpuobj *mem)
-{
- nv50_grctx_generate(&(struct nouveau_grctx) {
- .dev = dev,
- .mode = NOUVEAU_GRCTX_VALS,
- .data = mem,
- });
-}
-
-int
-nv50_grctx_init(struct drm_device *dev, u32 *data, u32 max, u32 *len, u32 *cnt)
-{
- struct nouveau_grctx ctx = {
- .dev = dev,
- .mode = NOUVEAU_GRCTX_PROG,
- .data = data,
- .ctxprog_max = max
- };
- int ret;
-
- ret = nv50_grctx_generate(&ctx);
- *cnt = ctx.ctxvals_pos * 4;
- *len = ctx.ctxprog_len;
- return ret;
-}
-
-/*
- * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
- * registers to save/restore and the default values for them.
- */
-
-static void
-nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx);
-
-static void
-nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i, j;
- int offset, base;
- uint32_t units = nv_rd32 (ctx->dev, 0x1540);
-
- /* 0800: DISPATCH */
- cp_ctx(ctx, 0x400808, 7);
- gr_def(ctx, 0x400814, 0x00000030);
- cp_ctx(ctx, 0x400834, 0x32);
- if (dev_priv->chipset == 0x50) {
- gr_def(ctx, 0x400834, 0xff400040);
- gr_def(ctx, 0x400838, 0xfff00080);
- gr_def(ctx, 0x40083c, 0xfff70090);
- gr_def(ctx, 0x400840, 0xffe806a8);
- }
- gr_def(ctx, 0x400844, 0x00000002);
- if (IS_NVA3F(dev_priv->chipset))
- gr_def(ctx, 0x400894, 0x00001000);
- gr_def(ctx, 0x4008e8, 0x00000003);
- gr_def(ctx, 0x4008ec, 0x00001000);
- if (dev_priv->chipset == 0x50)
- cp_ctx(ctx, 0x400908, 0xb);
- else if (dev_priv->chipset < 0xa0)
- cp_ctx(ctx, 0x400908, 0xc);
- else
- cp_ctx(ctx, 0x400908, 0xe);
-
- if (dev_priv->chipset >= 0xa0)
- cp_ctx(ctx, 0x400b00, 0x1);
- if (IS_NVA3F(dev_priv->chipset)) {
- cp_ctx(ctx, 0x400b10, 0x1);
- gr_def(ctx, 0x400b10, 0x0001629d);
- cp_ctx(ctx, 0x400b20, 0x1);
- gr_def(ctx, 0x400b20, 0x0001629d);
- }
-
- nv50_graph_construct_mmio_ddata(ctx);
-
- /* 0C00: VFETCH */
- cp_ctx(ctx, 0x400c08, 0x2);
- gr_def(ctx, 0x400c08, 0x0000fe0c);
-
- /* 1000 */
- if (dev_priv->chipset < 0xa0) {
- cp_ctx(ctx, 0x401008, 0x4);
- gr_def(ctx, 0x401014, 0x00001000);
- } else if (!IS_NVA3F(dev_priv->chipset)) {
- cp_ctx(ctx, 0x401008, 0x5);
- gr_def(ctx, 0x401018, 0x00001000);
- } else {
- cp_ctx(ctx, 0x401008, 0x5);
- gr_def(ctx, 0x401018, 0x00004000);
- }
-
- /* 1400 */
- cp_ctx(ctx, 0x401400, 0x8);
- cp_ctx(ctx, 0x401424, 0x3);
- if (dev_priv->chipset == 0x50)
- gr_def(ctx, 0x40142c, 0x0001fd87);
- else
- gr_def(ctx, 0x40142c, 0x00000187);
- cp_ctx(ctx, 0x401540, 0x5);
- gr_def(ctx, 0x401550, 0x00001018);
-
- /* 1800: STREAMOUT */
- cp_ctx(ctx, 0x401814, 0x1);
- gr_def(ctx, 0x401814, 0x000000ff);
- if (dev_priv->chipset == 0x50) {
- cp_ctx(ctx, 0x40181c, 0xe);
- gr_def(ctx, 0x401850, 0x00000004);
- } else if (dev_priv->chipset < 0xa0) {
- cp_ctx(ctx, 0x40181c, 0xf);
- gr_def(ctx, 0x401854, 0x00000004);
- } else {
- cp_ctx(ctx, 0x40181c, 0x13);
- gr_def(ctx, 0x401864, 0x00000004);
- }
-
- /* 1C00 */
- cp_ctx(ctx, 0x401c00, 0x1);
- switch (dev_priv->chipset) {
- case 0x50:
- gr_def(ctx, 0x401c00, 0x0001005f);
- break;
- case 0x84:
- case 0x86:
- case 0x94:
- gr_def(ctx, 0x401c00, 0x044d00df);
- break;
- case 0x92:
- case 0x96:
- case 0x98:
- case 0xa0:
- case 0xaa:
- case 0xac:
- gr_def(ctx, 0x401c00, 0x042500df);
- break;
- case 0xa3:
- case 0xa5:
- case 0xa8:
- case 0xaf:
- gr_def(ctx, 0x401c00, 0x142500df);
- break;
- }
-
- /* 2000 */
-
- /* 2400 */
- cp_ctx(ctx, 0x402400, 0x1);
- if (dev_priv->chipset == 0x50)
- cp_ctx(ctx, 0x402408, 0x1);
- else
- cp_ctx(ctx, 0x402408, 0x2);
- gr_def(ctx, 0x402408, 0x00000600);
-
- /* 2800: CSCHED */
- cp_ctx(ctx, 0x402800, 0x1);
- if (dev_priv->chipset == 0x50)
- gr_def(ctx, 0x402800, 0x00000006);
-
- /* 2C00: ZCULL */
- cp_ctx(ctx, 0x402c08, 0x6);
- if (dev_priv->chipset != 0x50)
- gr_def(ctx, 0x402c14, 0x01000000);
- gr_def(ctx, 0x402c18, 0x000000ff);
- if (dev_priv->chipset == 0x50)
- cp_ctx(ctx, 0x402ca0, 0x1);
- else
- cp_ctx(ctx, 0x402ca0, 0x2);
- if (dev_priv->chipset < 0xa0)
- gr_def(ctx, 0x402ca0, 0x00000400);
- else if (!IS_NVA3F(dev_priv->chipset))
- gr_def(ctx, 0x402ca0, 0x00000800);
- else
- gr_def(ctx, 0x402ca0, 0x00000400);
- cp_ctx(ctx, 0x402cac, 0x4);
-
- /* 3000: ENG2D */
- cp_ctx(ctx, 0x403004, 0x1);
- gr_def(ctx, 0x403004, 0x00000001);
-
- /* 3400 */
- if (dev_priv->chipset >= 0xa0) {
- cp_ctx(ctx, 0x403404, 0x1);
- gr_def(ctx, 0x403404, 0x00000001);
- }
-
- /* 5000: CCACHE */
- cp_ctx(ctx, 0x405000, 0x1);
- switch (dev_priv->chipset) {
- case 0x50:
- gr_def(ctx, 0x405000, 0x00300080);
- break;
- case 0x84:
- case 0xa0:
- case 0xa3:
- case 0xa5:
- case 0xa8:
- case 0xaa:
- case 0xac:
- case 0xaf:
- gr_def(ctx, 0x405000, 0x000e0080);
- break;
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0x98:
- gr_def(ctx, 0x405000, 0x00000080);
- break;
- }
- cp_ctx(ctx, 0x405014, 0x1);
- gr_def(ctx, 0x405014, 0x00000004);
- cp_ctx(ctx, 0x40501c, 0x1);
- cp_ctx(ctx, 0x405024, 0x1);
- cp_ctx(ctx, 0x40502c, 0x1);
-
- /* 6000? */
- if (dev_priv->chipset == 0x50)
- cp_ctx(ctx, 0x4063e0, 0x1);
-
- /* 6800: M2MF */
- if (dev_priv->chipset < 0x90) {
- cp_ctx(ctx, 0x406814, 0x2b);
- gr_def(ctx, 0x406818, 0x00000f80);
- gr_def(ctx, 0x406860, 0x007f0080);
- gr_def(ctx, 0x40689c, 0x007f0080);
- } else {
- cp_ctx(ctx, 0x406814, 0x4);
- if (dev_priv->chipset == 0x98)
- gr_def(ctx, 0x406818, 0x00000f80);
- else
- gr_def(ctx, 0x406818, 0x00001f80);
- if (IS_NVA3F(dev_priv->chipset))
- gr_def(ctx, 0x40681c, 0x00000030);
- cp_ctx(ctx, 0x406830, 0x3);
- }
-
- /* 7000: per-ROP group state */
- for (i = 0; i < 8; i++) {
- if (units & (1<<(i+16))) {
- cp_ctx(ctx, 0x407000 + (i<<8), 3);
- if (dev_priv->chipset == 0x50)
- gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
- else if (dev_priv->chipset != 0xa5)
- gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
- else
- gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
- gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
-
- if (dev_priv->chipset == 0x50) {
- cp_ctx(ctx, 0x407010 + (i<<8), 1);
- } else if (dev_priv->chipset < 0xa0) {
- cp_ctx(ctx, 0x407010 + (i<<8), 2);
- gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
- gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
- } else {
- cp_ctx(ctx, 0x407010 + (i<<8), 3);
- gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
- if (dev_priv->chipset != 0xa5)
- gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
- else
- gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
- }
-
- cp_ctx(ctx, 0x407080 + (i<<8), 4);
- if (dev_priv->chipset != 0xa5)
- gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
- else
- gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
- if (dev_priv->chipset == 0x50)
- gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
- else
- gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
- gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
-
- if (dev_priv->chipset < 0xa0)
- cp_ctx(ctx, 0x407094 + (i<<8), 1);
- else if (!IS_NVA3F(dev_priv->chipset))
- cp_ctx(ctx, 0x407094 + (i<<8), 3);
- else {
- cp_ctx(ctx, 0x407094 + (i<<8), 4);
- gr_def(ctx, 0x4070a0 + (i<<8), 1);
- }
- }
- }
-
- cp_ctx(ctx, 0x407c00, 0x3);
- if (dev_priv->chipset < 0x90)
- gr_def(ctx, 0x407c00, 0x00010040);
- else if (dev_priv->chipset < 0xa0)
- gr_def(ctx, 0x407c00, 0x00390040);
- else
- gr_def(ctx, 0x407c00, 0x003d0040);
- gr_def(ctx, 0x407c08, 0x00000022);
- if (dev_priv->chipset >= 0xa0) {
- cp_ctx(ctx, 0x407c10, 0x3);
- cp_ctx(ctx, 0x407c20, 0x1);
- cp_ctx(ctx, 0x407c2c, 0x1);
- }
-
- if (dev_priv->chipset < 0xa0) {
- cp_ctx(ctx, 0x407d00, 0x9);
- } else {
- cp_ctx(ctx, 0x407d00, 0x15);
- }
- if (dev_priv->chipset == 0x98)
- gr_def(ctx, 0x407d08, 0x00380040);
- else {
- if (dev_priv->chipset < 0x90)
- gr_def(ctx, 0x407d08, 0x00010040);
- else if (dev_priv->chipset < 0xa0)
- gr_def(ctx, 0x407d08, 0x00390040);
- else
- gr_def(ctx, 0x407d08, 0x003d0040);
- gr_def(ctx, 0x407d0c, 0x00000022);
- }
-
- /* 8000+: per-TP state */
- for (i = 0; i < 10; i++) {
- if (units & (1<<i)) {
- if (dev_priv->chipset < 0xa0)
- base = 0x408000 + (i<<12);
- else
- base = 0x408000 + (i<<11);
- if (dev_priv->chipset < 0xa0)
- offset = base + 0xc00;
- else
- offset = base + 0x80;
- cp_ctx(ctx, offset + 0x00, 1);
- gr_def(ctx, offset + 0x00, 0x0000ff0a);
- cp_ctx(ctx, offset + 0x08, 1);
-
- /* per-MP state */
- for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
- if (!(units & (1 << (j+24)))) continue;
- if (dev_priv->chipset < 0xa0)
- offset = base + 0x200 + (j<<7);
- else
- offset = base + 0x100 + (j<<7);
- cp_ctx(ctx, offset, 0x20);
- gr_def(ctx, offset + 0x00, 0x01800000);
- gr_def(ctx, offset + 0x04, 0x00160000);
- gr_def(ctx, offset + 0x08, 0x01800000);
- gr_def(ctx, offset + 0x18, 0x0003ffff);
- switch (dev_priv->chipset) {
- case 0x50:
- gr_def(ctx, offset + 0x1c, 0x00080000);
- break;
- case 0x84:
- gr_def(ctx, offset + 0x1c, 0x00880000);
- break;
- case 0x86:
- gr_def(ctx, offset + 0x1c, 0x018c0000);
- break;
- case 0x92:
- case 0x96:
- case 0x98:
- gr_def(ctx, offset + 0x1c, 0x118c0000);
- break;
- case 0x94:
- gr_def(ctx, offset + 0x1c, 0x10880000);
- break;
- case 0xa0:
- case 0xa5:
- gr_def(ctx, offset + 0x1c, 0x310c0000);
- break;
- case 0xa3:
- case 0xa8:
- case 0xaa:
- case 0xac:
- case 0xaf:
- gr_def(ctx, offset + 0x1c, 0x300c0000);
- break;
- }
- gr_def(ctx, offset + 0x40, 0x00010401);
- if (dev_priv->chipset == 0x50)
- gr_def(ctx, offset + 0x48, 0x00000040);
- else
- gr_def(ctx, offset + 0x48, 0x00000078);
- gr_def(ctx, offset + 0x50, 0x000000bf);
- gr_def(ctx, offset + 0x58, 0x00001210);
- if (dev_priv->chipset == 0x50)
- gr_def(ctx, offset + 0x5c, 0x00000080);
- else
- gr_def(ctx, offset + 0x5c, 0x08000080);
- if (dev_priv->chipset >= 0xa0)
- gr_def(ctx, offset + 0x68, 0x0000003e);
- }
-
- if (dev_priv->chipset < 0xa0)
- cp_ctx(ctx, base + 0x300, 0x4);
- else
- cp_ctx(ctx, base + 0x300, 0x5);
- if (dev_priv->chipset == 0x50)
- gr_def(ctx, base + 0x304, 0x00007070);
- else if (dev_priv->chipset < 0xa0)
- gr_def(ctx, base + 0x304, 0x00027070);
- else if (!IS_NVA3F(dev_priv->chipset))
- gr_def(ctx, base + 0x304, 0x01127070);
- else
- gr_def(ctx, base + 0x304, 0x05127070);
-
- if (dev_priv->chipset < 0xa0)
- cp_ctx(ctx, base + 0x318, 1);
- else
- cp_ctx(ctx, base + 0x320, 1);
- if (dev_priv->chipset == 0x50)
- gr_def(ctx, base + 0x318, 0x0003ffff);
- else if (dev_priv->chipset < 0xa0)
- gr_def(ctx, base + 0x318, 0x03ffffff);
- else
- gr_def(ctx, base + 0x320, 0x07ffffff);
-
- if (dev_priv->chipset < 0xa0)
- cp_ctx(ctx, base + 0x324, 5);
- else
- cp_ctx(ctx, base + 0x328, 4);
-
- if (dev_priv->chipset < 0xa0) {
- cp_ctx(ctx, base + 0x340, 9);
- offset = base + 0x340;
- } else if (!IS_NVA3F(dev_priv->chipset)) {
- cp_ctx(ctx, base + 0x33c, 0xb);
- offset = base + 0x344;
- } else {
- cp_ctx(ctx, base + 0x33c, 0xd);
- offset = base + 0x344;
- }
- gr_def(ctx, offset + 0x0, 0x00120407);
- gr_def(ctx, offset + 0x4, 0x05091507);
- if (dev_priv->chipset == 0x84)
- gr_def(ctx, offset + 0x8, 0x05100202);
- else
- gr_def(ctx, offset + 0x8, 0x05010202);
- gr_def(ctx, offset + 0xc, 0x00030201);
- if (dev_priv->chipset == 0xa3)
- cp_ctx(ctx, base + 0x36c, 1);
-
- cp_ctx(ctx, base + 0x400, 2);
- gr_def(ctx, base + 0x404, 0x00000040);
- cp_ctx(ctx, base + 0x40c, 2);
- gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
- gr_def(ctx, base + 0x410, 0x00141210);
-
- if (dev_priv->chipset < 0xa0)
- offset = base + 0x800;
- else
- offset = base + 0x500;
- cp_ctx(ctx, offset, 6);
- gr_def(ctx, offset + 0x0, 0x000001f0);
- gr_def(ctx, offset + 0x4, 0x00000001);
- gr_def(ctx, offset + 0x8, 0x00000003);
- if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset))
- gr_def(ctx, offset + 0xc, 0x00008000);
- gr_def(ctx, offset + 0x14, 0x00039e00);
- cp_ctx(ctx, offset + 0x1c, 2);
- if (dev_priv->chipset == 0x50)
- gr_def(ctx, offset + 0x1c, 0x00000040);
- else
- gr_def(ctx, offset + 0x1c, 0x00000100);
- gr_def(ctx, offset + 0x20, 0x00003800);
-
- if (dev_priv->chipset >= 0xa0) {
- cp_ctx(ctx, base + 0x54c, 2);
- if (!IS_NVA3F(dev_priv->chipset))
- gr_def(ctx, base + 0x54c, 0x003fe006);
- else
- gr_def(ctx, base + 0x54c, 0x003fe007);
- gr_def(ctx, base + 0x550, 0x003fe000);
- }
-
- if (dev_priv->chipset < 0xa0)
- offset = base + 0xa00;
- else
- offset = base + 0x680;
- cp_ctx(ctx, offset, 1);
- gr_def(ctx, offset, 0x00404040);
-
- if (dev_priv->chipset < 0xa0)
- offset = base + 0xe00;
- else
- offset = base + 0x700;
- cp_ctx(ctx, offset, 2);
- if (dev_priv->chipset < 0xa0)
- gr_def(ctx, offset, 0x0077f005);
- else if (dev_priv->chipset == 0xa5)
- gr_def(ctx, offset, 0x6cf7f007);
- else if (dev_priv->chipset == 0xa8)
- gr_def(ctx, offset, 0x6cfff007);
- else if (dev_priv->chipset == 0xac)
- gr_def(ctx, offset, 0x0cfff007);
- else
- gr_def(ctx, offset, 0x0cf7f007);
- if (dev_priv->chipset == 0x50)
- gr_def(ctx, offset + 0x4, 0x00007fff);
- else if (dev_priv->chipset < 0xa0)
- gr_def(ctx, offset + 0x4, 0x003f7fff);
- else
- gr_def(ctx, offset + 0x4, 0x02bf7fff);
- cp_ctx(ctx, offset + 0x2c, 1);
- if (dev_priv->chipset == 0x50) {
- cp_ctx(ctx, offset + 0x50, 9);
- gr_def(ctx, offset + 0x54, 0x000003ff);
- gr_def(ctx, offset + 0x58, 0x00000003);
- gr_def(ctx, offset + 0x5c, 0x00000003);
- gr_def(ctx, offset + 0x60, 0x000001ff);
- gr_def(ctx, offset + 0x64, 0x0000001f);
- gr_def(ctx, offset + 0x68, 0x0000000f);
- gr_def(ctx, offset + 0x6c, 0x0000000f);
- } else if (dev_priv->chipset < 0xa0) {
- cp_ctx(ctx, offset + 0x50, 1);
- cp_ctx(ctx, offset + 0x70, 1);
- } else {
- cp_ctx(ctx, offset + 0x50, 1);
- cp_ctx(ctx, offset + 0x60, 5);
- }
- }
- }
-}
-
-static void
-dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
- int i;
- if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
- for (i = 0; i < num; i++)
- nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val);
- ctx->ctxvals_pos += num;
-}
-
-static void
-nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int base, num;
- base = ctx->ctxvals_pos;
-
- /* tesla state */
- dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */
- dd_emit(ctx, 1, 0); /* 00000001 UNK135C */
-
- /* SRC_TIC state */
- dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */
- dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */
- dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */
- dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */
- dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */
- if (dev_priv->chipset >= 0x94)
- dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */
- dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */
- dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */
-
- /* turing state */
- dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */
- dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */
- dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */
- dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */
- dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
- dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
- dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */
- dd_emit(ctx, 1, 1); /* 00000001 LANES32 */
- dd_emit(ctx, 1, 0); /* 000000ff UNK370 */
- dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */
- dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */
- dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */
- dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
- dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
- dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
- dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */
- dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */
- dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */
- dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */
- dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */
- dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */
- dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */
- dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */
- if (IS_NVA3F(dev_priv->chipset))
- dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */
- dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */
- dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */
- dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */
- dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */
- dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */
- dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */
- dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */
- dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */
-
- /* compat 2d state */
- if (dev_priv->chipset == 0x50) {
- dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */
-
- dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */
-
- dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */
- dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */
- dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */
-
- dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */
- dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */
- dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */
- dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */
- dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */
- dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */
- dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */
- dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */
- dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */
- dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */
-
- dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */
- dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */
- dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */
- dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */
- dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */
- dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */
- dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */
- dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */
- dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */
- dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */
-
- dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */
- dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */
-
- dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */
-
- dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */
- dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */
- dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */
- dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */
- }
-
- /* m2mf state */
- dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */
- dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */
- dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */
- dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */
- dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */
- dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */
- dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */
- dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */
- dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */
- dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */
- dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */
- dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */
- dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */
- dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */
- dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */
-
- /* more compat 2d state */
- if (dev_priv->chipset == 0x50) {
- dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */
- dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */
-
- dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */
- dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */
-
- dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */
- dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */
- dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */
- dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */
- dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */
- dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */
- dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */
- dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */
- dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */
- dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */
- dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */
- dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */
-
- dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */
- }
-
- /* tesla state */
- dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */
- dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */
- dd_emit(ctx, 1, 0); /* 000000ff */
- dd_emit(ctx, 1, 0); /* ffffffff */
- dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */
- dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */
- dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */
- dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */
- dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */
- dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */
- if (IS_NVA3F(dev_priv->chipset)) {
- dd_emit(ctx, 1, 0); /* ffffffff */
- dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */
- } else {
- dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */
- }
- dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */
- if (dev_priv->chipset != 0x50)
- dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */
- dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */
- dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */
- if (dev_priv->chipset == 0x50) {
- dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */
- dd_emit(ctx, 1, 0); /* 00000001 */
- } else {
- dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */
- dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */
- dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */
- dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
- dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */
- dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */
- dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */
- dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */
- }
- dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */
- dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */
- dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */
- dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */
- dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */
- dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */
- dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */
- dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */
- dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */
- dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */
- dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
- dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */
- dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */
- if (dev_priv->chipset != 0x50)
- dd_emit(ctx, 3, 0); /* 1, 1, 1 */
- else
- dd_emit(ctx, 2, 0); /* 1, 1 */
- dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */
- dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/
- dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
- dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- if (IS_NVA3F(dev_priv->chipset)) {
- dd_emit(ctx, 1, 3); /* 00000003 */
- dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */
- }
- if (dev_priv->chipset != 0x50)
- dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */
- dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */
- dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */
- if (dev_priv->chipset != 0x50)
- dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */
- dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */
- dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */
- dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */
- dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */
- dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */
- dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */
- dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */
- dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
- dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */
- dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
- if (dev_priv->chipset >= 0xa0)
- dd_emit(ctx, 1, 0); /* ffffffff */
- dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */
- dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */
- dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
- dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
- dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/
- if (dev_priv->chipset != 0x50)
- dd_emit(ctx, 8, 0); /* 00000001 */
- if (dev_priv->chipset >= 0xa0) {
- dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */
- dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */
- dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */
- dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */
- }
- dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
- dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */
- dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */
- dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */
- if (IS_NVA3F(dev_priv->chipset))
- dd_emit(ctx, 1, 0); /* 00000001 */
- dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */
- if (dev_priv->chipset >= 0xa0)
- dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */
- dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */
- if (dev_priv->chipset >= 0xa0)
- dd_emit(ctx, 1, 0); /* 00000003 */
- dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */
- dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */
- dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */
- dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */
- dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */
- if (dev_priv->chipset != 0x50) {
- dd_emit(ctx, 1, 0xe00); /* 7fff */
- dd_emit(ctx, 1, 0x1000); /* 7fff */
- dd_emit(ctx, 1, 0x1e00); /* 7fff */
- }
- dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */
- dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */
- dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */
- dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */
- dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */
- dd_emit(ctx, 1, 1); /* 00000001 */
- dd_emit(ctx, 1, 0); /* 00000001 */
- dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */
- dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */
- dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */
- if (IS_NVA3F(dev_priv->chipset))
- dd_emit(ctx, 1, 0x200);
- dd_emit(ctx, 1, 0); /* 00000001 */
- if (dev_priv->chipset < 0xa0) {
- dd_emit(ctx, 1, 1); /* 00000001 */
- dd_emit(ctx, 1, 0x70); /* 000000ff */
- dd_emit(ctx, 1, 0x80); /* 000000ff */
- dd_emit(ctx, 1, 0); /* 000000ff */
- dd_emit(ctx, 1, 0); /* 00000001 */
- dd_emit(ctx, 1, 1); /* 00000001 */
- dd_emit(ctx, 1, 0x70); /* 000000ff */
- dd_emit(ctx, 1, 0x80); /* 000000ff */
- dd_emit(ctx, 1, 0); /* 000000ff */
- } else {
- dd_emit(ctx, 1, 1); /* 00000001 */
- dd_emit(ctx, 1, 0xf0); /* 000000ff */
- dd_emit(ctx, 1, 0xff); /* 000000ff */
- dd_emit(ctx, 1, 0); /* 000000ff */
- dd_emit(ctx, 1, 0); /* 00000001 */
- dd_emit(ctx, 1, 1); /* 00000001 */
- dd_emit(ctx, 1, 0xf0); /* 000000ff */
- dd_emit(ctx, 1, 0xff); /* 000000ff */
- dd_emit(ctx, 1, 0); /* 000000ff */
- dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */
- }
-
- /* eng2d state */
- dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */
- dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */
- dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */
- dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */
- dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */
- dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */
- dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */
- dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */
- dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */
- dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */
- dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */
- dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */
- dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */
- dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */
- dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */
- dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */
- dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */
- dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */
- dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */
- dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */
-
- num = ctx->ctxvals_pos - base;
- ctx->ctxvals_pos = base;
- if (IS_NVA3F(dev_priv->chipset))
- cp_ctx(ctx, 0x404800, num);
- else
- cp_ctx(ctx, 0x405400, num);
-}
-
-/*
- * xfer areas. These are a pain.
- *
- * There are 2 xfer areas: the first one is big and contains all sorts of
- * stuff, the second is small and contains some per-TP context.
- *
- * Each area is split into 8 "strands". The areas, when saved to grctx,
- * are made of 8-word blocks. Each block contains a single word from
- * each strand. The strands are independent of each other, their
- * addresses are unrelated to each other, and data in them is closely
- * packed together. The strand layout varies a bit between cards: here
- * and there, a single word is thrown out in the middle and the whole
- * strand is offset by a bit from corresponding one on another chipset.
- * For this reason, addresses of stuff in strands are almost useless.
- * Knowing sequence of stuff and size of gaps between them is much more
- * useful, and that's how we build the strands in our generator.
- *
- * NVA0 takes this mess to a whole new level by cutting the old strands
- * into a few dozen pieces [known as genes], rearranging them randomly,
- * and putting them back together to make new strands. Hopefully these
- * genes correspond more or less directly to the same PGRAPH subunits
- * as in 400040 register.
- *
- * The most common value in default context is 0, and when the genes
- * are separated by 0's, gene bounduaries are quite speculative...
- * some of them can be clearly deduced, others can be guessed, and yet
- * others won't be resolved without figuring out the real meaning of
- * given ctxval. For the same reason, ending point of each strand
- * is unknown. Except for strand 0, which is the longest strand and
- * its end corresponds to end of the whole xfer.
- *
- * An unsolved mystery is the seek instruction: it takes an argument
- * in bits 8-18, and that argument is clearly the place in strands to
- * seek to... but the offsets don't seem to correspond to offsets as
- * seen in grctx. Perhaps there's another, real, not randomly-changing
- * addressing in strands, and the xfer insn just happens to skip over
- * the unused bits? NV10-NV30 PIPE comes to mind...
- *
- * As far as I know, there's no way to access the xfer areas directly
- * without the help of ctxprog.
- */
-
-static void
-xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
- int i;
- if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
- for (i = 0; i < num; i++)
- nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val);
- ctx->ctxvals_pos += num << 3;
-}
-
-/* Gene declarations... */
-
-static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
-static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
-
-static void
-nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i;
- int offset;
- int size = 0;
- uint32_t units = nv_rd32 (ctx->dev, 0x1540);
-
- offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
- ctx->ctxvals_base = offset;
-
- if (dev_priv->chipset < 0xa0) {
- /* Strand 0 */
- ctx->ctxvals_pos = offset;
- nv50_graph_construct_gene_dispatch(ctx);
- nv50_graph_construct_gene_m2mf(ctx);
- nv50_graph_construct_gene_unk24xx(ctx);
- nv50_graph_construct_gene_clipid(ctx);
- nv50_graph_construct_gene_zcull(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 1 */
- ctx->ctxvals_pos = offset + 0x1;
- nv50_graph_construct_gene_vfetch(ctx);
- nv50_graph_construct_gene_eng2d(ctx);
- nv50_graph_construct_gene_csched(ctx);
- nv50_graph_construct_gene_ropm1(ctx);
- nv50_graph_construct_gene_ropm2(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 2 */
- ctx->ctxvals_pos = offset + 0x2;
- nv50_graph_construct_gene_ccache(ctx);
- nv50_graph_construct_gene_unk1cxx(ctx);
- nv50_graph_construct_gene_strmout(ctx);
- nv50_graph_construct_gene_unk14xx(ctx);
- nv50_graph_construct_gene_unk10xx(ctx);
- nv50_graph_construct_gene_unk34xx(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 3: per-ROP group state */
- ctx->ctxvals_pos = offset + 3;
- for (i = 0; i < 6; i++)
- if (units & (1 << (i + 16)))
- nv50_graph_construct_gene_ropc(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strands 4-7: per-TP state */
- for (i = 0; i < 4; i++) {
- ctx->ctxvals_pos = offset + 4 + i;
- if (units & (1 << (2 * i)))
- nv50_graph_construct_xfer_tp(ctx);
- if (units & (1 << (2 * i + 1)))
- nv50_graph_construct_xfer_tp(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
- }
- } else {
- /* Strand 0 */
- ctx->ctxvals_pos = offset;
- nv50_graph_construct_gene_dispatch(ctx);
- nv50_graph_construct_gene_m2mf(ctx);
- nv50_graph_construct_gene_unk34xx(ctx);
- nv50_graph_construct_gene_csched(ctx);
- nv50_graph_construct_gene_unk1cxx(ctx);
- nv50_graph_construct_gene_strmout(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 1 */
- ctx->ctxvals_pos = offset + 1;
- nv50_graph_construct_gene_unk10xx(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 2 */
- ctx->ctxvals_pos = offset + 2;
- if (dev_priv->chipset == 0xa0)
- nv50_graph_construct_gene_unk14xx(ctx);
- nv50_graph_construct_gene_unk24xx(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 3 */
- ctx->ctxvals_pos = offset + 3;
- nv50_graph_construct_gene_vfetch(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 4 */
- ctx->ctxvals_pos = offset + 4;
- nv50_graph_construct_gene_ccache(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 5 */
- ctx->ctxvals_pos = offset + 5;
- nv50_graph_construct_gene_ropm2(ctx);
- nv50_graph_construct_gene_ropm1(ctx);
- /* per-ROP context */
- for (i = 0; i < 8; i++)
- if (units & (1<<(i+16)))
- nv50_graph_construct_gene_ropc(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 6 */
- ctx->ctxvals_pos = offset + 6;
- nv50_graph_construct_gene_zcull(ctx);
- nv50_graph_construct_gene_clipid(ctx);
- nv50_graph_construct_gene_eng2d(ctx);
- if (units & (1 << 0))
- nv50_graph_construct_xfer_tp(ctx);
- if (units & (1 << 1))
- nv50_graph_construct_xfer_tp(ctx);
- if (units & (1 << 2))
- nv50_graph_construct_xfer_tp(ctx);
- if (units & (1 << 3))
- nv50_graph_construct_xfer_tp(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 7 */
- ctx->ctxvals_pos = offset + 7;
- if (dev_priv->chipset == 0xa0) {
- if (units & (1 << 4))
- nv50_graph_construct_xfer_tp(ctx);
- if (units & (1 << 5))
- nv50_graph_construct_xfer_tp(ctx);
- if (units & (1 << 6))
- nv50_graph_construct_xfer_tp(ctx);
- if (units & (1 << 7))
- nv50_graph_construct_xfer_tp(ctx);
- if (units & (1 << 8))
- nv50_graph_construct_xfer_tp(ctx);
- if (units & (1 << 9))
- nv50_graph_construct_xfer_tp(ctx);
- } else {
- nv50_graph_construct_gene_unk14xx(ctx);
- }
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
- }
-
- ctx->ctxvals_pos = offset + size * 8;
- ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
- cp_lsr (ctx, offset);
- cp_out (ctx, CP_SET_XFER_POINTER);
- cp_lsr (ctx, size);
- cp_out (ctx, CP_SEEK_1);
- cp_out (ctx, CP_XFER_1);
- cp_wait(ctx, XFER, BUSY);
-}
-
-/*
- * non-trivial demagiced parts of ctx init go here
- */
-
-static void
-nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx)
-{
- /* start of strand 0 */
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* SEEK */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 5, 0);
- else if (!IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 6, 0);
- else
- xf_emit(ctx, 4, 0);
- /* SEEK */
- /* the PGRAPH's internal FIFO */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 8*3, 0);
- else
- xf_emit(ctx, 0x100*3, 0);
- /* and another bonus slot?!? */
- xf_emit(ctx, 3, 0);
- /* and YET ANOTHER bonus slot? */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 3, 0);
- /* SEEK */
- /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */
- xf_emit(ctx, 9, 0);
- /* SEEK */
- xf_emit(ctx, 9, 0);
- /* SEEK */
- xf_emit(ctx, 9, 0);
- /* SEEK */
- xf_emit(ctx, 9, 0);
- /* SEEK */
- if (dev_priv->chipset < 0x90)
- xf_emit(ctx, 4, 0);
- /* SEEK */
- xf_emit(ctx, 2, 0);
- /* SEEK */
- xf_emit(ctx, 6*2, 0);
- xf_emit(ctx, 2, 0);
- /* SEEK */
- xf_emit(ctx, 2, 0);
- /* SEEK */
- xf_emit(ctx, 6*2, 0);
- xf_emit(ctx, 2, 0);
- /* SEEK */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 0x1c, 0);
- else if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x1e, 0);
- else
- xf_emit(ctx, 0x22, 0);
- /* SEEK */
- xf_emit(ctx, 0x15, 0);
-}
-
-static void
-nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
-{
- /* Strand 0, right after dispatch */
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int smallm2mf = 0;
- if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98)
- smallm2mf = 1;
- /* SEEK */
- xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
- xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
- xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
- xf_emit (ctx, 1, 0); /* OFFSET_IN */
- xf_emit (ctx, 1, 0); /* OFFSET_OUT */
- xf_emit (ctx, 1, 0); /* PITCH_IN */
- xf_emit (ctx, 1, 0); /* PITCH_OUT */
- xf_emit (ctx, 1, 0); /* LINE_LENGTH */
- xf_emit (ctx, 1, 0); /* LINE_COUNT */
- xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */
- xf_emit (ctx, 1, 1); /* LINEAR_IN */
- xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */
- xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */
- xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */
- xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */
- xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */
- xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */
- xf_emit (ctx, 1, 1); /* LINEAR_OUT */
- xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */
- xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */
- xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */
- xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */
- xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */
- xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
- xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
- xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
- /* SEEK */
- if (smallm2mf)
- xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */
- else
- xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */
- xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */
- /* SEEK */
- if (smallm2mf)
- xf_emit(ctx, 0x400, 0); /* ffffffff */
- else
- xf_emit(ctx, 0x800, 0); /* ffffffff */
- xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */
- /* SEEK */
- xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */
- xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */
-}
-
-static void
-nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 2, 0); /* RO */
- xf_emit(ctx, 0x800, 0); /* ffffffff */
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x92:
- case 0xa0:
- xf_emit(ctx, 0x2b, 0);
- break;
- case 0x84:
- xf_emit(ctx, 0x29, 0);
- break;
- case 0x94:
- case 0x96:
- case 0xa3:
- xf_emit(ctx, 0x27, 0);
- break;
- case 0x86:
- case 0x98:
- case 0xa5:
- case 0xa8:
- case 0xaa:
- case 0xac:
- case 0xaf:
- xf_emit(ctx, 0x25, 0);
- break;
- }
- /* CB bindings, 0x80 of them. first word is address >> 8, second is
- * size >> 4 | valid << 24 */
- xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */
- xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */
- xf_emit(ctx, 1, 0); /* 0 */
- xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */
- xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */
- xf_emit(ctx, 4, 0); /* RO */
- xf_emit(ctx, 0x100, 0); /* ffffffff */
- xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */
- xf_emit(ctx, 8, 0); /* ffffffff */
- xf_emit(ctx, 4, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 3 */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */
- xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
- xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */
- xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */
- xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */
- xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */
- xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */
- xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */
- xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */
- xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */
- xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */
- xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */
-}
-
-static void
-nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i;
- /* end of area 2 on pre-NVA0, area 1 on NVAx */
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
- xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
- xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
- xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff);
- else
- xf_emit(ctx, 1, 0x7ff); /* 000007ff */
- xf_emit(ctx, 1, 0); /* 111/113 */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- for (i = 0; i < 8; i++) {
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x86:
- case 0x98:
- case 0xaa:
- case 0xac:
- xf_emit(ctx, 0xa0, 0); /* ffffffff */
- break;
- case 0x84:
- case 0x92:
- case 0x94:
- case 0x96:
- xf_emit(ctx, 0x120, 0);
- break;
- case 0xa5:
- case 0xa8:
- xf_emit(ctx, 0x100, 0); /* ffffffff */
- break;
- case 0xa0:
- case 0xa3:
- case 0xaf:
- xf_emit(ctx, 0x400, 0); /* ffffffff */
- break;
- }
- xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */
- xf_emit(ctx, 4, 0); /* ffffffff */
- }
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
- xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */
- xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
- xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */
- xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
- xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
-}
-
-static void
-nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* end of area 2 on pre-NVA0, area 1 on NVAx */
- xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
- xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */
- xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */
- xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */
- xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */
- xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */
- xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */
- xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
- xf_emit(ctx, 1, 0); /* 00000007 */
- xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0x0fac6881);
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 1, 1);
- xf_emit(ctx, 3, 0);
- }
-}
-
-static void
-nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
- if (dev_priv->chipset != 0x50) {
- xf_emit(ctx, 5, 0); /* ffffffff */
- xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 1, 0); /* 000003ff */
- xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 2, 4); /* 7f, ff */
- xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- }
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
- xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */
- if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 0); /* 3ff */
- xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
- xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */
- xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
- xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
- if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */
- xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
- xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
- xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
- xf_emit(ctx, 1, 0); /* 0000000f */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
- else
- xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
- xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
- xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
- xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */
- xf_emit(ctx, 3, 0); /* f, 0, 0 */
- xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
- xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */
- xf_emit(ctx, 3, 0); /* f, 0, 0 */
- xf_emit(ctx, 3, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
- xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
- xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
- xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
- xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
- xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
- xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
- xf_emit(ctx, 1, 0); /* 0000000f */
- xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
- xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
- if (dev_priv->chipset != 0x50) {
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 1, 0); /* 000003ff */
- }
- xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */
- xf_emit(ctx, 1, 0); /* f */
- xf_emit(ctx, 1, 0); /* 0? */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 003fffff */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */
- xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
- xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
- xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
- xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
- xf_emit(ctx, 1, 0); /* 0000000f */
-}
-
-static void
-nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */
- /* SEEK */
- xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
- xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
- xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
- xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
- xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */
- xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
- xf_emit(ctx, 1, 0); /* 0000ffff */
- xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */
- xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */
- xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
- xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */
- xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
- xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
- xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
- xf_emit(ctx, 1, 0); /* 00000007 */
- if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */
- xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
- /* SEEK */
- xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
- xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
- xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
- xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
- xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */
- xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */
- xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */
- if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */
-}
-
-static void
-nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx)
-{
- /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */
- /* SEEK */
- xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */
- xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */
- xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
- xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */
- xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */
- xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */
- xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */
- xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */
- xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */
-}
-
-static void
-nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i;
- /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */
- /* SEEK */
- xf_emit(ctx, 0x33, 0);
- /* SEEK */
- xf_emit(ctx, 2, 0);
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- /* SEEK */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 4, 0); /* RO */
- xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
- xf_emit(ctx, 1, 0); /* 1ff */
- xf_emit(ctx, 8, 0); /* 0? */
- xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
-
- xf_emit(ctx, 4, 0); /* RO */
- xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
- xf_emit(ctx, 1, 0); /* 1ff */
- xf_emit(ctx, 8, 0); /* 0? */
- xf_emit(ctx, 9, 0); /* ffffffff, 7ff */
- } else {
- xf_emit(ctx, 0xc, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
- xf_emit(ctx, 1, 0); /* 1ff */
- xf_emit(ctx, 8, 0); /* 0? */
-
- /* SEEK */
- xf_emit(ctx, 0xc, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */
- xf_emit(ctx, 1, 0); /* 1ff */
- xf_emit(ctx, 8, 0); /* 0? */
- }
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
- xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
- xf_emit(ctx, 1, 1); /* 00000001 */
- /* SEEK */
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 2, 4); /* 000000ff */
- xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */
- xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
- xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0); /* 0000000f */
- xf_emit(ctx, 1, 1); /* 00000001 */
- for (i = 0; i < 10; i++) {
- /* SEEK */
- xf_emit(ctx, 0x40, 0); /* ffffffff */
- xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */
- xf_emit(ctx, 0x10, 0); /* ffffffff */
- }
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */
- xf_emit(ctx, 1, 1); /* 00000001 */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */
- xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */
- xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
- xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 0); /* 000003ff */
-}
-
-static void
-nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int acnt = 0x10, rep, i;
- /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */
- if (IS_NVA3F(dev_priv->chipset))
- acnt = 0x20;
- /* SEEK */
- if (dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */
- xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */
- }
- xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */
- xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */
- xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */
- xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
- xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
- xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */
- xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
- xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */
- xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */
- xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- /* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 0xb, 0); /* RO */
- else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0x9, 0); /* RO */
- else
- xf_emit(ctx, 0x8, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */
- xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
- /* SEEK */
- xf_emit(ctx, 0xc, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 7f/ff */
- xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
- xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
- xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */
- xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */
- else
- xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */
- if (dev_priv->chipset == 0xa8)
- xf_emit(ctx, 1, 0x1e00); /* 7fff */
- /* SEEK */
- xf_emit(ctx, 0xc, 0); /* RO or close */
- /* SEEK */
- xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
- xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
- xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
- if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0)
- xf_emit(ctx, 2, 0); /* ffffffff */
- else
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */
- /* SEEK */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 0x10, 0); /* 0? */
- xf_emit(ctx, 2, 0); /* weird... */
- xf_emit(ctx, 2, 0); /* RO */
- } else {
- xf_emit(ctx, 8, 0); /* 0? */
- xf_emit(ctx, 1, 0); /* weird... */
- xf_emit(ctx, 2, 0); /* RO */
- }
- /* SEEK */
- xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */
- xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */
- xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */
- /* SEEK */
- xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
- xf_emit(ctx, 1, 0); /* f/1f */
- /* SEEK */
- xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */
- xf_emit(ctx, 1, 0); /* f/1f */
- /* SEEK */
- xf_emit(ctx, acnt, 0); /* RO */
- xf_emit(ctx, 2, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */
- xf_emit(ctx, 1, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */
- xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */
- /* SEEK */
- xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */
- xf_emit(ctx, 3, 0); /* f/1f */
- /* SEEK */
- xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */
- xf_emit(ctx, 3, 0); /* f/1f */
- /* SEEK */
- xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */
- xf_emit(ctx, 3, 0); /* f/1f */
- /* SEEK */
- xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */
- xf_emit(ctx, 3, 0); /* f/1f */
- /* SEEK */
- xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */
- xf_emit(ctx, 3, 0); /* f/1f */
- /* SEEK */
- xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */
- xf_emit(ctx, 3, 0); /* f/1f */
- /* SEEK */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, acnt, 0); /* f */
- xf_emit(ctx, 3, 0); /* f/1f */
- }
- /* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 2, 0); /* RO */
- else
- xf_emit(ctx, 5, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */
- /* SEEK */
- if (dev_priv->chipset < 0xa0) {
- xf_emit(ctx, 0x41, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 0x11, 0); /* RO */
- } else if (!IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 0x50, 0); /* RO */
- else
- xf_emit(ctx, 0x58, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
- xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
- xf_emit(ctx, 1, 1); /* 1 UNK0DEC */
- /* SEEK */
- xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */
- xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */
- /* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 0x1d, 0); /* RO */
- else
- xf_emit(ctx, 0x16, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
- xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
- /* SEEK */
- if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 8, 0); /* RO */
- else if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 0xc, 0); /* RO */
- else
- xf_emit(ctx, 7, 0); /* RO */
- /* SEEK */
- xf_emit(ctx, 0xa, 0); /* RO */
- if (dev_priv->chipset == 0xa0)
- rep = 0xc;
- else
- rep = 4;
- for (i = 0; i < rep; i++) {
- /* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 0x20, 0); /* ffffffff */
- xf_emit(ctx, 0x200, 0); /* ffffffff */
- xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */
- xf_emit(ctx, 4, 0); /* ffffffff */
- }
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 113/111 */
- xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */
- xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */
- xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */
- xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- /* SEEK */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 7, 0); /* weird... */
- else
- xf_emit(ctx, 5, 0); /* weird... */
-}
-
-static void
-nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */
- /* SEEK */
- xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */
- xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */
- xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */
- if (dev_priv->chipset < 0xa0) {
- /* this is useless on everything but the original NV50,
- * guess they forgot to nuke it. Or just didn't bother. */
- xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */
- xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */
- xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */
- }
- xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
- xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */
- xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */
- xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */
- xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */
- xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */
- xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */
- xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */
- xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */
- xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */
- xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */
- xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */
- xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */
- xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */
- xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */
- xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */
- xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
- xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */
- xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */
- xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */
- xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */
- xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */
- xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */
- xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */
- xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */
- xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */
- xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */
- xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */
- xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */
- xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */
- xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */
- xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */
- xf_emit(ctx, 1, 0); /* 00000001 UNK888 */
- xf_emit(ctx, 1, 4); /* 0000003f UNK884 */
- xf_emit(ctx, 1, 0); /* 00000007 UNK880 */
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */
- xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */
- xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */
- xf_emit(ctx, 1, 0); /* 00000001 UNK260 */
- xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */
- /* SEEK */
- xf_emit(ctx, 0x10, 0);
- /* SEEK */
- xf_emit(ctx, 0x27, 0);
-}
-
-static void
-nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */
- /* SEEK */
- xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */
- xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
- xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- xf_emit(ctx, 1, 0); /* 000003ff */
- /* SEEK */
- xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */
- xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */
- xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */
- xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */
- xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */
- xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */
- xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */
- xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */
- xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */
- xf_emit(ctx, 1, 1); /* 00000001 LANES32 */
- xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
- xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
- /* SEEK */
- xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */
- switch (dev_priv->chipset) {
- case 0x50:
- case 0x92:
- xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
- xf_emit(ctx, 0x80, 0); /* fff */
- xf_emit(ctx, 2, 0); /* ff, fff */
- xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */
- break;
- case 0x84:
- xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
- xf_emit(ctx, 0x60, 0); /* fff */
- xf_emit(ctx, 2, 0); /* ff, fff */
- xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
- break;
- case 0x94:
- case 0x96:
- xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
- xf_emit(ctx, 0x40, 0); /* fff */
- xf_emit(ctx, 2, 0); /* ff, fff */
- xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */
- break;
- case 0x86:
- case 0x98:
- xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */
- xf_emit(ctx, 0x10, 0); /* fff */
- xf_emit(ctx, 2, 0); /* ff, fff */
- xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */
- break;
- case 0xa0:
- xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
- xf_emit(ctx, 0xf0, 0); /* fff */
- xf_emit(ctx, 2, 0); /* ff, fff */
- xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */
- break;
- case 0xa3:
- xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
- xf_emit(ctx, 0x60, 0); /* fff */
- xf_emit(ctx, 2, 0); /* ff, fff */
- xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */
- break;
- case 0xa5:
- case 0xaf:
- xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */
- xf_emit(ctx, 0x30, 0); /* fff */
- xf_emit(ctx, 2, 0); /* ff, fff */
- xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */
- break;
- case 0xaa:
- xf_emit(ctx, 0x12, 0);
- break;
- case 0xa8:
- case 0xac:
- xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */
- xf_emit(ctx, 0x10, 0); /* fff */
- xf_emit(ctx, 2, 0); /* ff, fff */
- xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */
- break;
- }
- xf_emit(ctx, 1, 0); /* 0000000f */
- xf_emit(ctx, 1, 0); /* 00000000 */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 0000001f */
- xf_emit(ctx, 4, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 4, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 000000ff */
-}
-
-static void
-nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
- xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */
- xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */
- xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */
- xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */
- xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */
- xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
- xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
- xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */
- xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
- xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
- xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */
- xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */
- xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */
- xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
- xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */
- else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */
- xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
- xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
- xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
- xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */
- xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */
- xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */
- if (dev_priv->chipset != 0x50) {
- xf_emit(ctx, 1, 0); /* 3ff */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */
- }
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
- xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
- xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
- xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */
- xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */
- xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
- xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */
- xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */
- xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
- xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
- if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
- if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x1c, 0); /* RO */
- else if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 0x9, 0);
- xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
- xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
- xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
- xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */
- if (dev_priv->chipset != 0x50) {
- xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */
- xf_emit(ctx, 1, 0); /* 3ff */
- }
- /* XXX: the following block could belong either to unk1cxx, or
- * to STRMOUT. Rather hard to tell. */
- if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0x25, 0);
- else
- xf_emit(ctx, 0x3b, 0);
-}
-
-static void
-nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
- xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
- xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
- if (dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
- xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
- }
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */
- else
- xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- /* SEEK */
- xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */
- xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */
- xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */
- xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */
- xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */
- if (dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */
- xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */
- }
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
- xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
- xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */
- xf_emit(ctx, 2, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- /* SEEK */
- xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */
- xf_emit(ctx, 1, 0); /* 0000000f */
- xf_emit(ctx, 1, 0); /* 00000000? */
- xf_emit(ctx, 2, 0); /* ffffffff */
-}
-
-static void
-nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
- xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
- xf_emit(ctx, 1, 0); /* 00000007 */
- xf_emit(ctx, 1, 0); /* 000003ff */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
-}
-
-static void
-nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
- xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- xf_emit(ctx, 2, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
- xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
- xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
- xf_emit(ctx, 1, 0); /* 7 */
- /* SEEK */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */
- xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */
- xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */
- xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */
- xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */
- xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- xf_emit(ctx, 1, 0); /* 00000007 */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
-}
-
-static void
-nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int magic2;
- if (dev_priv->chipset == 0x50) {
- magic2 = 0x00003e60;
- } else if (!IS_NVA3F(dev_priv->chipset)) {
- magic2 = 0x001ffe67;
- } else {
- magic2 = 0x00087e67;
- }
- xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
- xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
- xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
- xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
- xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset))
- xf_emit(ctx, 1, 0x15); /* 000000ff */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
- xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */
- xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */
- xf_emit(ctx, 1, 4); /* 7 */
- xf_emit(ctx, 1, 0x400); /* fffffff */
- xf_emit(ctx, 1, 0x300); /* ffff */
- xf_emit(ctx, 1, 0x1001); /* 1fff */
- if (dev_priv->chipset != 0xa0) {
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */
- else
- xf_emit(ctx, 1, 0x15); /* ff */
- }
- }
- xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
- xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
- xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */
- xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
- xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
- xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- xf_emit(ctx, 1, 0); /* 0000000f */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
- xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
- xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
- xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */
- xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
- xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
- xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
- xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */
- xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
- xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
- xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */
- xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
- xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
- if (dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 2, 0);
- xf_emit(ctx, 1, 0x1001);
- xf_emit(ctx, 0xb, 0);
- } else {
- xf_emit(ctx, 1, 0); /* 00000007 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- }
- xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 1, 0x11); /* 3f/7f */
- xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
- if (dev_priv->chipset != 0x50) {
- xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
- xf_emit(ctx, 1, 0); /* 000000ff */
- }
- xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
- xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */
- xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
- xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
- xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
- xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
- xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */
- xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
- xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
- xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
- xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
- xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
- xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
- xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */
- xf_emit(ctx, 2, 0); /* 00000001 */
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- xf_emit(ctx, 1, 0); /* 0000000f */
- xf_emit(ctx, 1, 0); /* 00000003 */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 2, 0); /* 00000001 */
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 1, 0); /* 000003ff */
- } else if (dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 2, 0); /* 00000001 */
- xf_emit(ctx, 1, 0); /* 00000007 */
- xf_emit(ctx, 1, 0); /* 00000003 */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 2, 0); /* 00000001 */
- } else {
- xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- }
- xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */
- xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */
- xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 2, 0); /* 00000001 */
- xf_emit(ctx, 1, 0); /* 000003ff */
- xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
- xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
- xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
- xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
- xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
- xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
- xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
- xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
- xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
- xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
- xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
- xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
- xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
- xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
- xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
- xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
- xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */
- }
- xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
- xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */
- xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */
- xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */
- xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */
- xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */
- xf_emit(ctx, 1, 0); /* 000000ff ROP */
- xf_emit(ctx, 1, 0); /* ffffffff BETA1 */
- xf_emit(ctx, 1, 0); /* ffffffff BETA4 */
- xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
- xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */
-}
-
-static void
-nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int magic3;
- switch (dev_priv->chipset) {
- case 0x50:
- magic3 = 0x1000;
- break;
- case 0x86:
- case 0x98:
- case 0xa8:
- case 0xaa:
- case 0xac:
- case 0xaf:
- magic3 = 0x1e00;
- break;
- default:
- magic3 = 0;
- }
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 0x1f, 0); /* ffffffff */
- else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0x0f, 0); /* ffffffff */
- else
- xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */
- xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */
- xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
- xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0x03020100); /* ffffffff */
- else
- xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */
- xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
- if (magic3)
- xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */
- xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 0); /* 111/113 */
- xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */
- xf_emit(ctx, 1, 0); /* 0000001f */
- xf_emit(ctx, 1, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */
- xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */
- xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
- if (magic3)
- xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */
- xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 0); /* 111/113 */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
- xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */
- xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- xf_emit(ctx, 1, 0); /* 111/113 */
- if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
- xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
- else if (dev_priv->chipset < 0xa0)
- xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */
- else if (!IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 0x210, 0); /* ffffffff */
- else
- xf_emit(ctx, 0x410, 0); /* ffffffff */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */
- xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */
- xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
-}
-
-static void
-nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int magic1, magic2;
- if (dev_priv->chipset == 0x50) {
- magic1 = 0x3ff;
- magic2 = 0x00003e60;
- } else if (!IS_NVA3F(dev_priv->chipset)) {
- magic1 = 0x7ff;
- magic2 = 0x001ffe67;
- } else {
- magic1 = 0x7ff;
- magic2 = 0x00087e67;
- }
- xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
- xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */
- xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */
- xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */
- xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */
- xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */
- xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */
- xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */
- xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
- xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */
- xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
- xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
- xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */
- xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
- xf_emit(ctx, 1, 0); /* 7 */
- xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */
- xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */
- xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */
- xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
- xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
- xf_emit(ctx, 1, 0); /* 00000003 */
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */
- } else if (dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */
- xf_emit(ctx, 1, 0); /* 00000003 */
- } else {
- xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
- }
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
- xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
- xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
- xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
- xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
- xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
- xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
- xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
- xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
- xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
- xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */
- xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */
- xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */
- xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */
- xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
- }
- xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
- xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
- xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
- xf_emit(ctx, 1, 0); /* 7 */
- xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
- xf_emit(ctx, 1, 0); /* 00000007 OPERATION */
- xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */
- xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */
- xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
- xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
- xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
- xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
- xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
- xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
- xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
- xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */
- xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0); /* ff */
- else
- xf_emit(ctx, 3, 0); /* 1, 7, 3ff */
- xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
- xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000007 */
- xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
- xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */
- xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */
- xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */
- xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000007 */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */
- xf_emit(ctx, 1, 0); /* 7 */
- xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */
- xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */
- xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */
- xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */
- xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */
- xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */
- xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */
- xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */
- xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
- xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */
- xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */
- xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */
- xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */
- xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */
- xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */
- xf_emit(ctx, 1, 0); /* 0000ffff */
- xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
- xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
- xf_emit(ctx, 1, 0); /* 00000007 */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
- xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */
- xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- xf_emit(ctx, 2, 0); /* ffff, ff/3ff */
- xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
- xf_emit(ctx, 1, 0); /* 00000007 */
- xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */
- xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */
- xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */
- xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */
- xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 0); /* 00000001 */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */
- xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */
- xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */
- xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
- xf_emit(ctx, 1, 0); /* 7 */
- xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- }
- xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0x0fac6881); /* fffffff */
- xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */
- xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
- xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */
- xf_emit(ctx, 1, 0); /* 00000007 */
- xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */
- xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */
- xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */
- }
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */
- if (dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */
- xf_emit(ctx, 1, 0xfac6881); /* fffffff */
- xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */
- xf_emit(ctx, 1, 4); /* 7 */
- xf_emit(ctx, 1, 0); /* 1 */
- xf_emit(ctx, 2, 1); /* 1 */
- xf_emit(ctx, 2, 0); /* 7, f */
- xf_emit(ctx, 1, 1); /* 1 */
- xf_emit(ctx, 1, 0); /* 7/f */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 0x9, 0); /* 1 */
- else
- xf_emit(ctx, 0x8, 0); /* 1 */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 8, 1); /* 1 */
- xf_emit(ctx, 1, 0x11); /* 7f */
- xf_emit(ctx, 7, 0); /* 7f */
- xf_emit(ctx, 1, 0xfac6881); /* fffffff */
- xf_emit(ctx, 1, 0xf); /* f */
- xf_emit(ctx, 7, 0); /* f */
- xf_emit(ctx, 1, 0x11); /* 7f */
- xf_emit(ctx, 1, 1); /* 1 */
- xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- }
- }
-}
-
-static void
-nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */
- if (dev_priv->chipset != 0x50)
- xf_emit(ctx, 1, 0); /* 3 */
- xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */
- xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */
- xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */
- xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */
- else
- xf_emit(ctx, 2, 0); /* 3ff, 1 */
- xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */
- xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */
- xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */
- xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */
- xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */
- xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */
- xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */
- xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */
- if (dev_priv->chipset == 0x50) {
- xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
- xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */
- xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */
- xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */
- } else if (!IS_NVAAF(dev_priv->chipset)) {
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */
- xf_emit(ctx, 1, 0); /* 00000003 */
- xf_emit(ctx, 1, 0); /* 000003ff */
- xf_emit(ctx, 1, 0); /* 00000003 */
- xf_emit(ctx, 1, 0); /* 000003ff */
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */
- xf_emit(ctx, 1, 0); /* 00000003 */
- xf_emit(ctx, 1, 0); /* 000003ff */
- } else {
- xf_emit(ctx, 0x6, 0);
- }
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */
- xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */
-}
-
-static void
-nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
- xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */
- xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */
- xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */
- xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
- xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */
- xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */
- xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */
- xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */
- xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */
- xf_emit(ctx, 1, 0); /* ffff0ff3 */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */
- xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */
- xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */
- xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */
- xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */
- xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */
- xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */
- xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */
- xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */
- xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */
-}
-
-static void
-nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- if (dev_priv->chipset < 0xa0) {
- nv50_graph_construct_xfer_unk84xx(ctx);
- nv50_graph_construct_xfer_tprop(ctx);
- nv50_graph_construct_xfer_tex(ctx);
- nv50_graph_construct_xfer_unk8cxx(ctx);
- } else {
- nv50_graph_construct_xfer_tex(ctx);
- nv50_graph_construct_xfer_tprop(ctx);
- nv50_graph_construct_xfer_unk8cxx(ctx);
- nv50_graph_construct_xfer_unk84xx(ctx);
- }
-}
-
-static void
-nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i, mpcnt = 2;
- switch (dev_priv->chipset) {
- case 0x98:
- case 0xaa:
- mpcnt = 1;
- break;
- case 0x50:
- case 0x84:
- case 0x86:
- case 0x92:
- case 0x94:
- case 0x96:
- case 0xa8:
- case 0xac:
- mpcnt = 2;
- break;
- case 0xa0:
- case 0xa3:
- case 0xa5:
- case 0xaf:
- mpcnt = 3;
- break;
- }
- for (i = 0; i < mpcnt; i++) {
- xf_emit(ctx, 1, 0); /* ff */
- xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */
- xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */
- xf_emit(ctx, 1, 0x04000400); /* ffffffff */
- if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */
- xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */
- if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) {
- xf_emit(ctx, 1, 0xe00); /* 7fff */
- xf_emit(ctx, 1, 0x1e00); /* 7fff */
- }
- xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */
- xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */
- xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */
- xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */
- xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */
- xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
- if (IS_NVAAF(dev_priv->chipset))
- xf_emit(ctx, 0xb, 0); /* RO */
- else if (dev_priv->chipset >= 0xa0)
- xf_emit(ctx, 0xc, 0); /* RO */
- else
- xf_emit(ctx, 0xa, 0); /* RO */
- }
- xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- if (dev_priv->chipset >= 0xa0) {
- xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */
- }
- xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */
- xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */
- xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */
- xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */
- xf_emit(ctx, 1, 1); /* 00000001 LANES32 */
- xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */
- xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */
- xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */
- xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */
- xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */
- xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */
- xf_emit(ctx, 1, 0); /* ff/3ff */
- xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */
- xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */
- xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */
- xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */
- xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */
- xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */
- xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */
- xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */
- xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */
- xf_emit(ctx, 1, 0); /* 00000007 */
- xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */
- xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */
- if (IS_NVA3F(dev_priv->chipset))
- xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */
- xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */
- xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */
- xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */
- xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */
- xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */
- xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */
- xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */
- xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */
- xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */
- xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */
- xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */
- xf_emit(ctx, 1, 1); /* 00000001 UNK133C */
- if (IS_NVA3F(dev_priv->chipset)) {
- xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */
- xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */
- xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */
- xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */
- xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */
- xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */
- xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */
- xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */
- xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */
- }
- xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */
- xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */
- /* XXX: demagic this part some day */
- if (dev_priv->chipset == 0x50)
- xf_emit(ctx, 0x3a0, 0);
- else if (dev_priv->chipset < 0x94)
- xf_emit(ctx, 0x3a2, 0);
- else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
- xf_emit(ctx, 0x39f, 0);
- else
- xf_emit(ctx, 0x3a3, 0);
- xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */
- xf_emit(ctx, 1, 0); /* 7 OPERATION */
- xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */
- xf_emit(ctx, 0x2d, 0);
-}
-
-static void
-nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
-{
- struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
- int i;
- uint32_t offset;
- uint32_t units = nv_rd32 (ctx->dev, 0x1540);
- int size = 0;
-
- offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
-
- if (dev_priv->chipset < 0xa0) {
- for (i = 0; i < 8; i++) {
- ctx->ctxvals_pos = offset + i;
- /* that little bugger belongs to csched. No idea
- * what it's doing here. */
- if (i == 0)
- xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
- if (units & (1 << i))
- nv50_graph_construct_xfer_mpc(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
- }
- } else {
- /* Strand 0: TPs 0, 1 */
- ctx->ctxvals_pos = offset;
- /* that little bugger belongs to csched. No idea
- * what it's doing here. */
- xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */
- if (units & (1 << 0))
- nv50_graph_construct_xfer_mpc(ctx);
- if (units & (1 << 1))
- nv50_graph_construct_xfer_mpc(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 1: TPs 2, 3 */
- ctx->ctxvals_pos = offset + 1;
- if (units & (1 << 2))
- nv50_graph_construct_xfer_mpc(ctx);
- if (units & (1 << 3))
- nv50_graph_construct_xfer_mpc(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 2: TPs 4, 5, 6 */
- ctx->ctxvals_pos = offset + 2;
- if (units & (1 << 4))
- nv50_graph_construct_xfer_mpc(ctx);
- if (units & (1 << 5))
- nv50_graph_construct_xfer_mpc(ctx);
- if (units & (1 << 6))
- nv50_graph_construct_xfer_mpc(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
-
- /* Strand 3: TPs 7, 8, 9 */
- ctx->ctxvals_pos = offset + 3;
- if (units & (1 << 7))
- nv50_graph_construct_xfer_mpc(ctx);
- if (units & (1 << 8))
- nv50_graph_construct_xfer_mpc(ctx);
- if (units & (1 << 9))
- nv50_graph_construct_xfer_mpc(ctx);
- if ((ctx->ctxvals_pos-offset)/8 > size)
- size = (ctx->ctxvals_pos-offset)/8;
- }
- ctx->ctxvals_pos = offset + size * 8;
- ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
- cp_lsr (ctx, offset);
- cp_out (ctx, CP_SET_XFER_POINTER);
- cp_lsr (ctx, size);
- cp_out (ctx, CP_SEEK_2);
- cp_out (ctx, CP_XFER_2);
- cp_wait(ctx, XFER, BUSY);
-}
+++ /dev/null
-/*
- * Copyright (C) 2007 Ben Skeggs.
- *
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_vm.h"
-
-#define BAR1_VM_BASE 0x0020000000ULL
-#define BAR1_VM_SIZE pci_resource_len(dev->pdev, 1)
-#define BAR3_VM_BASE 0x0000000000ULL
-#define BAR3_VM_SIZE pci_resource_len(dev->pdev, 3)
-
-struct nv50_instmem_priv {
- uint32_t save1700[5]; /* 0x1700->0x1710 */
-
- struct nouveau_gpuobj *bar1_dmaobj;
- struct nouveau_gpuobj *bar3_dmaobj;
-};
-
-static void
-nv50_channel_del(struct nouveau_channel **pchan)
-{
- struct nouveau_channel *chan;
-
- chan = *pchan;
- *pchan = NULL;
- if (!chan)
- return;
-
- nouveau_gpuobj_ref(NULL, &chan->ramfc);
- nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
- nouveau_gpuobj_ref(NULL, &chan->vm_pd);
- if (drm_mm_initialized(&chan->ramin_heap))
- drm_mm_takedown(&chan->ramin_heap);
- nouveau_gpuobj_ref(NULL, &chan->ramin);
- kfree(chan);
-}
-
-static int
-nv50_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
- struct nouveau_channel **pchan)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 pgd = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
- u32 fc = (dev_priv->chipset == 0x50) ? 0x0000 : 0x4200;
- struct nouveau_channel *chan;
- int ret, i;
-
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
- chan->dev = dev;
-
- ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
- if (ret) {
- nv50_channel_del(&chan);
- return ret;
- }
-
- ret = drm_mm_init(&chan->ramin_heap, 0x6000, chan->ramin->size - 0x6000);
- if (ret) {
- nv50_channel_del(&chan);
- return ret;
- }
-
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
- chan->ramin->pinst + pgd,
- chan->ramin->vinst + pgd,
- 0x4000, NVOBJ_FLAG_ZERO_ALLOC,
- &chan->vm_pd);
- if (ret) {
- nv50_channel_del(&chan);
- return ret;
- }
-
- for (i = 0; i < 0x4000; i += 8) {
- nv_wo32(chan->vm_pd, i + 0, 0x00000000);
- nv_wo32(chan->vm_pd, i + 4, 0xdeadcafe);
- }
-
- ret = nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
- if (ret) {
- nv50_channel_del(&chan);
- return ret;
- }
-
- ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst == ~0 ? ~0 :
- chan->ramin->pinst + fc,
- chan->ramin->vinst + fc, 0x100,
- NVOBJ_FLAG_ZERO_ALLOC, &chan->ramfc);
- if (ret) {
- nv50_channel_del(&chan);
- return ret;
- }
-
- *pchan = chan;
- return 0;
-}
-
-int
-nv50_instmem_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv;
- struct nouveau_channel *chan;
- struct nouveau_vm *vm;
- int ret, i;
- u32 tmp;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- dev_priv->engine.instmem.priv = priv;
-
- /* Save state, will restore at takedown. */
- for (i = 0x1700; i <= 0x1710; i += 4)
- priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
-
- /* Global PRAMIN heap */
- ret = drm_mm_init(&dev_priv->ramin_heap, 0, dev_priv->ramin_size);
- if (ret) {
- NV_ERROR(dev, "Failed to init RAMIN heap\n");
- goto error;
- }
-
- /* BAR3 */
- ret = nouveau_vm_new(dev, BAR3_VM_BASE, BAR3_VM_SIZE, BAR3_VM_BASE,
- &dev_priv->bar3_vm);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, (BAR3_VM_SIZE >> 12) * 8,
- 0x1000, NVOBJ_FLAG_DONT_MAP |
- NVOBJ_FLAG_ZERO_ALLOC,
- &dev_priv->bar3_vm->pgt[0].obj[0]);
- if (ret)
- goto error;
- dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
-
- nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
-
- ret = nv50_channel_new(dev, 128 * 1024, dev_priv->bar3_vm, &chan);
- if (ret)
- goto error;
- dev_priv->channels.ptr[0] = dev_priv->channels.ptr[127] = chan;
-
- ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR3_VM_BASE, BAR3_VM_SIZE,
- NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
- NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
- &priv->bar3_dmaobj);
- if (ret)
- goto error;
-
- nv_wr32(dev, 0x001704, 0x00000000 | (chan->ramin->vinst >> 12));
- nv_wr32(dev, 0x001704, 0x40000000 | (chan->ramin->vinst >> 12));
- nv_wr32(dev, 0x00170c, 0x80000000 | (priv->bar3_dmaobj->cinst >> 4));
-
- dev_priv->engine.instmem.flush(dev);
- dev_priv->ramin_available = true;
-
- tmp = nv_ro32(chan->ramin, 0);
- nv_wo32(chan->ramin, 0, ~tmp);
- if (nv_ro32(chan->ramin, 0) != ~tmp) {
- NV_ERROR(dev, "PRAMIN readback failed\n");
- ret = -EIO;
- goto error;
- }
- nv_wo32(chan->ramin, 0, tmp);
-
- /* BAR1 */
- ret = nouveau_vm_new(dev, BAR1_VM_BASE, BAR1_VM_SIZE, BAR1_VM_BASE, &vm);
- if (ret)
- goto error;
-
- ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, chan->vm_pd);
- if (ret)
- goto error;
- nouveau_vm_ref(NULL, &vm, NULL);
-
- ret = nv50_gpuobj_dma_new(chan, 0x0000, BAR1_VM_BASE, BAR1_VM_SIZE,
- NV_MEM_TARGET_VM, NV_MEM_ACCESS_VM,
- NV_MEM_TYPE_VM, NV_MEM_COMP_VM,
- &priv->bar1_dmaobj);
- if (ret)
- goto error;
-
- nv_wr32(dev, 0x001708, 0x80000000 | (priv->bar1_dmaobj->cinst >> 4));
- for (i = 0; i < 8; i++)
- nv_wr32(dev, 0x1900 + (i*4), 0);
-
- /* Create shared channel VM, space is reserved at the beginning
- * to catch "NULL pointer" references
- */
- ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0020000000ULL,
- &dev_priv->chan_vm);
- if (ret)
- return ret;
-
- return 0;
-
-error:
- nv50_instmem_takedown(dev);
- return ret;
-}
-
-void
-nv50_instmem_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_channel *chan = dev_priv->channels.ptr[0];
- int i;
-
- NV_DEBUG(dev, "\n");
-
- if (!priv)
- return;
-
- dev_priv->ramin_available = false;
-
- nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
-
- for (i = 0x1700; i <= 0x1710; i += 4)
- nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
-
- nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj);
- nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj);
-
- nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd);
- dev_priv->channels.ptr[127] = 0;
- nv50_channel_del(&dev_priv->channels.ptr[0]);
-
- nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
- nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
-
- if (drm_mm_initialized(&dev_priv->ramin_heap))
- drm_mm_takedown(&dev_priv->ramin_heap);
-
- dev_priv->engine.instmem.priv = NULL;
- kfree(priv);
-}
-
-int
-nv50_instmem_suspend(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- dev_priv->ramin_available = false;
- return 0;
-}
-
-void
-nv50_instmem_resume(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_channel *chan = dev_priv->channels.ptr[0];
- int i;
-
- /* Poke the relevant regs, and pray it works :) */
- nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12));
- nv_wr32(dev, NV50_PUNK_UNK1710, 0);
- nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->vinst >> 12) |
- NV50_PUNK_BAR_CFG_BASE_VALID);
- nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->bar1_dmaobj->cinst >> 4) |
- NV50_PUNK_BAR1_CTXDMA_VALID);
- nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->bar3_dmaobj->cinst >> 4) |
- NV50_PUNK_BAR3_CTXDMA_VALID);
-
- for (i = 0; i < 8; i++)
- nv_wr32(dev, 0x1900 + (i*4), 0);
-
- dev_priv->ramin_available = true;
-}
-
-struct nv50_gpuobj_node {
- struct nouveau_mem *vram;
- struct nouveau_vma chan_vma;
- u32 align;
-};
-
-int
-nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan,
- u32 size, u32 align)
-{
- struct drm_device *dev = gpuobj->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
- struct nv50_gpuobj_node *node = NULL;
- int ret;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
- node->align = align;
-
- size = (size + 4095) & ~4095;
- align = max(align, (u32)4096);
-
- ret = vram->get(dev, size, align, 0, 0, &node->vram);
- if (ret) {
- kfree(node);
- return ret;
- }
-
- gpuobj->vinst = node->vram->offset;
-
- if (gpuobj->flags & NVOBJ_FLAG_VM) {
- u32 flags = NV_MEM_ACCESS_RW;
- if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER))
- flags |= NV_MEM_ACCESS_SYS;
-
- ret = nouveau_vm_get(chan->vm, size, 12, flags,
- &node->chan_vma);
- if (ret) {
- vram->put(dev, &node->vram);
- kfree(node);
- return ret;
- }
-
- nouveau_vm_map(&node->chan_vma, node->vram);
- gpuobj->linst = node->chan_vma.offset;
- }
-
- gpuobj->size = size;
- gpuobj->node = node;
- return 0;
-}
-
-void
-nv50_instmem_put(struct nouveau_gpuobj *gpuobj)
-{
- struct drm_device *dev = gpuobj->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
- struct nv50_gpuobj_node *node;
-
- node = gpuobj->node;
- gpuobj->node = NULL;
-
- if (node->chan_vma.node) {
- nouveau_vm_unmap(&node->chan_vma);
- nouveau_vm_put(&node->chan_vma);
- }
- vram->put(dev, &node->vram);
- kfree(node);
-}
-
-int
-nv50_instmem_map(struct nouveau_gpuobj *gpuobj)
-{
- struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
- struct nv50_gpuobj_node *node = gpuobj->node;
- int ret;
-
- ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12,
- NV_MEM_ACCESS_RW, &node->vram->bar_vma);
- if (ret)
- return ret;
-
- nouveau_vm_map(&node->vram->bar_vma, node->vram);
- gpuobj->pinst = node->vram->bar_vma.offset;
- return 0;
-}
-
-void
-nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
-{
- struct nv50_gpuobj_node *node = gpuobj->node;
-
- if (node->vram->bar_vma.node) {
- nouveau_vm_unmap(&node->vram->bar_vma);
- nouveau_vm_put(&node->vram->bar_vma);
- }
-}
-
-void
-nv50_instmem_flush(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->vm_lock, flags);
- nv_wr32(dev, 0x00330c, 0x00000001);
- if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
- NV_ERROR(dev, "PRAMIN flush timeout\n");
- spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
-}
-
-void
-nv84_instmem_flush(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->vm_lock, flags);
- nv_wr32(dev, 0x070000, 0x00000001);
- if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
- NV_ERROR(dev, "PRAMIN flush timeout\n");
- spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
-}
-
+++ /dev/null
-/*
- * Copyright (C) 2007 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-
-int
-nv50_mc_init(struct drm_device *dev)
-{
- nv_wr32(dev, NV03_PMC_ENABLE, 0xFFFFFFFF);
- return 0;
-}
-
-void nv50_mc_takedown(struct drm_device *dev)
-{
-}
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
-
-struct nv50_mpeg_engine {
- struct nouveau_exec_engine base;
-};
-
-static inline u32
-CTX_PTR(struct drm_device *dev, u32 offset)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (dev_priv->chipset == 0x50)
- offset += 0x0260;
- else
- offset += 0x0060;
-
- return offset;
-}
-
-static int
-nv50_mpeg_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramin = chan->ramin;
- struct nouveau_gpuobj *ctx = NULL;
- int ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- ret = nouveau_gpuobj_new(dev, chan, 128 * 4, 0, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ctx);
- if (ret)
- return ret;
-
- nv_wo32(ramin, CTX_PTR(dev, 0x00), 0x80190002);
- nv_wo32(ramin, CTX_PTR(dev, 0x04), ctx->vinst + ctx->size - 1);
- nv_wo32(ramin, CTX_PTR(dev, 0x08), ctx->vinst);
- nv_wo32(ramin, CTX_PTR(dev, 0x0c), 0);
- nv_wo32(ramin, CTX_PTR(dev, 0x10), 0);
- nv_wo32(ramin, CTX_PTR(dev, 0x14), 0x00010000);
-
- nv_wo32(ctx, 0x70, 0x00801ec1);
- nv_wo32(ctx, 0x7c, 0x0000037c);
- dev_priv->engine.instmem.flush(dev);
-
- chan->engctx[engine] = ctx;
- return 0;
-}
-
-static void
-nv50_mpeg_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- int i;
-
- for (i = 0x00; i <= 0x14; i += 4)
- nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000);
-
- nouveau_gpuobj_ref(NULL, &ctx);
- chan->engctx[engine] = NULL;
-}
-
-static int
-nv50_mpeg_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
- if (ret)
- return ret;
- obj->engine = 2;
- obj->class = class;
-
- nv_wo32(obj, 0x00, class);
- nv_wo32(obj, 0x04, 0x00000000);
- nv_wo32(obj, 0x08, 0x00000000);
- nv_wo32(obj, 0x0c, 0x00000000);
- dev_priv->engine.instmem.flush(dev);
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-
-static void
-nv50_mpeg_tlb_flush(struct drm_device *dev, int engine)
-{
- nv50_vm_flush_engine(dev, 0x08);
-}
-
-static int
-nv50_mpeg_init(struct drm_device *dev, int engine)
-{
- nv_wr32(dev, 0x00b32c, 0x00000000);
- nv_wr32(dev, 0x00b314, 0x00000100);
- nv_wr32(dev, 0x00b0e0, 0x0000001a);
-
- nv_wr32(dev, 0x00b220, 0x00000044);
- nv_wr32(dev, 0x00b300, 0x00801ec1);
- nv_wr32(dev, 0x00b390, 0x00000000);
- nv_wr32(dev, 0x00b394, 0x00000000);
- nv_wr32(dev, 0x00b398, 0x00000000);
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
-
- nv_wr32(dev, 0x00b100, 0xffffffff);
- nv_wr32(dev, 0x00b140, 0xffffffff);
-
- if (!nv_wait(dev, 0x00b200, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "PMPEG init: 0x%08x\n", nv_rd32(dev, 0x00b200));
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-nv50_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x00b140, 0x00000000);
- return 0;
-}
-
-static void
-nv50_mpeg_isr(struct drm_device *dev)
-{
- u32 stat = nv_rd32(dev, 0x00b100);
- u32 type = nv_rd32(dev, 0x00b230);
- u32 mthd = nv_rd32(dev, 0x00b234);
- u32 data = nv_rd32(dev, 0x00b238);
- u32 show = stat;
-
- if (stat & 0x01000000) {
- /* happens on initial binding of the object */
- if (type == 0x00000020 && mthd == 0x0000) {
- nv_wr32(dev, 0x00b308, 0x00000100);
- show &= ~0x01000000;
- }
- }
-
- if (show && nouveau_ratelimit()) {
- NV_INFO(dev, "PMPEG - 0x%08x 0x%08x 0x%08x 0x%08x\n",
- stat, type, mthd, data);
- }
-
- nv_wr32(dev, 0x00b100, stat);
- nv_wr32(dev, 0x00b230, 0x00000001);
- nv50_fb_vm_trap(dev, 1);
-}
-
-static void
-nv50_vpe_isr(struct drm_device *dev)
-{
- if (nv_rd32(dev, 0x00b100))
- nv50_mpeg_isr(dev);
-
- if (nv_rd32(dev, 0x00b800)) {
- u32 stat = nv_rd32(dev, 0x00b800);
- NV_INFO(dev, "PMSRCH: 0x%08x\n", stat);
- nv_wr32(dev, 0xb800, stat);
- }
-}
-
-static void
-nv50_mpeg_destroy(struct drm_device *dev, int engine)
-{
- struct nv50_mpeg_engine *pmpeg = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 0);
-
- NVOBJ_ENGINE_DEL(dev, MPEG);
- kfree(pmpeg);
-}
-
-int
-nv50_mpeg_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv50_mpeg_engine *pmpeg;
-
- pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
- if (!pmpeg)
- return -ENOMEM;
-
- pmpeg->base.destroy = nv50_mpeg_destroy;
- pmpeg->base.init = nv50_mpeg_init;
- pmpeg->base.fini = nv50_mpeg_fini;
- pmpeg->base.context_new = nv50_mpeg_context_new;
- pmpeg->base.context_del = nv50_mpeg_context_del;
- pmpeg->base.object_new = nv50_mpeg_object_new;
- pmpeg->base.tlb_flush = nv50_mpeg_tlb_flush;
-
- if (dev_priv->chipset == 0x50) {
- nouveau_irq_register(dev, 0, nv50_vpe_isr);
- NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
- NVOBJ_CLASS(dev, 0x3174, MPEG);
-#if 0
- NVOBJ_ENGINE_ADD(dev, ME, &pme->base);
- NVOBJ_CLASS(dev, 0x4075, ME);
-#endif
- } else {
- nouveau_irq_register(dev, 0, nv50_mpeg_isr);
- NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
- NVOBJ_CLASS(dev, 0x8274, MPEG);
- }
-
- return 0;
-
-}
#include "drmP.h"
#include "nouveau_drv.h"
-#include "nouveau_bios.h"
+#include <nouveau_bios.h>
#include "nouveau_hw.h"
#include "nouveau_pm.h"
#include "nouveau_hwsq.h"
#include "drmP.h"
#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_software.h"
#include "nv50_display.h"
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_vm.h"
-
-void
-nv50_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 pde,
- struct nouveau_gpuobj *pgt[2])
-{
- u64 phys = 0xdeadcafe00000000ULL;
- u32 coverage = 0;
-
- if (pgt[0]) {
- phys = 0x00000003 | pgt[0]->vinst; /* present, 4KiB pages */
- coverage = (pgt[0]->size >> 3) << 12;
- } else
- if (pgt[1]) {
- phys = 0x00000001 | pgt[1]->vinst; /* present */
- coverage = (pgt[1]->size >> 3) << 16;
- }
-
- if (phys & 1) {
- if (coverage <= 32 * 1024 * 1024)
- phys |= 0x60;
- else if (coverage <= 64 * 1024 * 1024)
- phys |= 0x40;
- else if (coverage <= 128 * 1024 * 1024)
- phys |= 0x20;
- }
-
- nv_wo32(pgd, (pde * 8) + 0, lower_32_bits(phys));
- nv_wo32(pgd, (pde * 8) + 4, upper_32_bits(phys));
-}
-
-static inline u64
-vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
-{
- phys |= 1; /* present */
- phys |= (u64)memtype << 40;
- phys |= target << 4;
- if (vma->access & NV_MEM_ACCESS_SYS)
- phys |= (1 << 6);
- if (!(vma->access & NV_MEM_ACCESS_WO))
- phys |= (1 << 3);
- return phys;
-}
-
-void
-nv50_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
-{
- struct drm_nouveau_private *dev_priv = vma->vm->dev->dev_private;
- u32 comp = (mem->memtype & 0x180) >> 7;
- u32 block, target;
- int i;
-
- /* IGPs don't have real VRAM, re-target to stolen system memory */
- target = 0;
- if (dev_priv->vram_sys_base) {
- phys += dev_priv->vram_sys_base;
- target = 3;
- }
-
- phys = vm_addr(vma, phys, mem->memtype, target);
- pte <<= 3;
- cnt <<= 3;
-
- while (cnt) {
- u32 offset_h = upper_32_bits(phys);
- u32 offset_l = lower_32_bits(phys);
-
- for (i = 7; i >= 0; i--) {
- block = 1 << (i + 3);
- if (cnt >= block && !(pte & (block - 1)))
- break;
- }
- offset_l |= (i << 7);
-
- phys += block << (vma->node->type - 3);
- cnt -= block;
- if (comp) {
- u32 tag = mem->tag->start + ((delta >> 16) * comp);
- offset_h |= (tag << 17);
- delta += block << (vma->node->type - 3);
- }
-
- while (block) {
- nv_wo32(pgt, pte + 0, offset_l);
- nv_wo32(pgt, pte + 4, offset_h);
- pte += 8;
- block -= 8;
- }
- }
-}
-
-void
-nv50_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
- pte <<= 3;
- while (cnt--) {
- u64 phys = vm_addr(vma, (u64)*list++, mem->memtype, target);
- nv_wo32(pgt, pte + 0, lower_32_bits(phys));
- nv_wo32(pgt, pte + 4, upper_32_bits(phys));
- pte += 8;
- }
-}
-
-void
-nv50_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
-{
- pte <<= 3;
- while (cnt--) {
- nv_wo32(pgt, pte + 0, 0x00000000);
- nv_wo32(pgt, pte + 4, 0x00000000);
- pte += 8;
- }
-}
-
-void
-nv50_vm_flush(struct nouveau_vm *vm)
-{
- struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
- struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- int i;
-
- pinstmem->flush(vm->dev);
-
- /* BAR */
- if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm) {
- nv50_vm_flush_engine(vm->dev, 6);
- return;
- }
-
- for (i = 0; i < NVOBJ_ENGINE_NR; i++) {
- if (atomic_read(&vm->engref[i]))
- dev_priv->eng[i]->tlb_flush(vm->dev, i);
- }
-}
-
-void
-nv50_vm_flush_engine(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned long flags;
-
- spin_lock_irqsave(&dev_priv->vm_lock, flags);
- nv_wr32(dev, 0x100c80, (engine << 16) | 1);
- if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
- NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
- spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
-}
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-
-static int types[0x80] = {
- 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 0, 0, 0, 0, 2, 2, 2, 2, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 0, 2, 2, 2, 2, 2, 2, 2, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 0, 0,
- 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 2, 2, 2, 2,
- 1, 0, 2, 0, 1, 0, 2, 0, 1, 1, 2, 2, 1, 1, 0, 0
-};
-
-bool
-nv50_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
-{
- int type = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
-
- if (likely(type < ARRAY_SIZE(types) && types[type]))
- return true;
- return false;
-}
-
-void
-nv50_vram_del(struct drm_device *dev, struct nouveau_mem **pmem)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
- struct nouveau_mm_node *this;
- struct nouveau_mem *mem;
-
- mem = *pmem;
- *pmem = NULL;
- if (unlikely(mem == NULL))
- return;
-
- mutex_lock(&mm->mutex);
- while (!list_empty(&mem->regions)) {
- this = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
-
- list_del(&this->rl_entry);
- nouveau_mm_put(mm, this);
- }
-
- if (mem->tag) {
- drm_mm_put_block(mem->tag);
- mem->tag = NULL;
- }
- mutex_unlock(&mm->mutex);
-
- kfree(mem);
-}
-
-int
-nv50_vram_new(struct drm_device *dev, u64 size, u32 align, u32 size_nc,
- u32 memtype, struct nouveau_mem **pmem)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
- struct nouveau_mm_node *r;
- struct nouveau_mem *mem;
- int comp = (memtype & 0x300) >> 8;
- int type = (memtype & 0x07f);
- int ret;
-
- if (!types[type])
- return -EINVAL;
- size >>= 12;
- align >>= 12;
- size_nc >>= 12;
-
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- mutex_lock(&mm->mutex);
- if (comp) {
- if (align == 16) {
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- int n = (size >> 4) * comp;
-
- mem->tag = drm_mm_search_free(&pfb->tag_heap, n, 0, 0);
- if (mem->tag)
- mem->tag = drm_mm_get_block(mem->tag, n, 0);
- }
-
- if (unlikely(!mem->tag))
- comp = 0;
- }
-
- INIT_LIST_HEAD(&mem->regions);
- mem->dev = dev_priv->dev;
- mem->memtype = (comp << 7) | type;
- mem->size = size;
-
- do {
- ret = nouveau_mm_get(mm, types[type], size, size_nc, align, &r);
- if (ret) {
- mutex_unlock(&mm->mutex);
- nv50_vram_del(dev, &mem);
- return ret;
- }
-
- list_add_tail(&r->rl_entry, &mem->regions);
- size -= r->length;
- } while (size);
- mutex_unlock(&mm->mutex);
-
- r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
- mem->offset = (u64)r->offset << 12;
- *pmem = mem;
- return 0;
-}
-
-static u32
-nv50_vram_rblock(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i, parts, colbits, rowbitsa, rowbitsb, banks;
- u64 rowsize, predicted;
- u32 r0, r4, rt, ru, rblock_size;
-
- r0 = nv_rd32(dev, 0x100200);
- r4 = nv_rd32(dev, 0x100204);
- rt = nv_rd32(dev, 0x100250);
- ru = nv_rd32(dev, 0x001540);
- NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
-
- for (i = 0, parts = 0; i < 8; i++) {
- if (ru & (0x00010000 << i))
- parts++;
- }
-
- colbits = (r4 & 0x0000f000) >> 12;
- rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
- rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
- banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
-
- rowsize = parts * banks * (1 << colbits) * 8;
- predicted = rowsize << rowbitsa;
- if (r0 & 0x00000004)
- predicted += rowsize << rowbitsb;
-
- if (predicted != dev_priv->vram_size) {
- NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
- (u32)(dev_priv->vram_size >> 20));
- NV_WARN(dev, "we calculated %dMiB VRAM\n",
- (u32)(predicted >> 20));
- }
-
- rblock_size = rowsize;
- if (rt & 1)
- rblock_size *= 3;
-
- NV_DEBUG(dev, "rblock %d bytes\n", rblock_size);
- return rblock_size;
-}
-
-int
-nv50_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 pfb714 = nv_rd32(dev, 0x100714);
- u32 rblock, length;
-
- switch (pfb714 & 0x00000007) {
- case 0: dev_priv->vram_type = NV_MEM_TYPE_DDR1; break;
- case 1:
- if (nouveau_mem_vbios_type(dev) == NV_MEM_TYPE_DDR3)
- dev_priv->vram_type = NV_MEM_TYPE_DDR3;
- else
- dev_priv->vram_type = NV_MEM_TYPE_DDR2;
- break;
- case 2: dev_priv->vram_type = NV_MEM_TYPE_GDDR3; break;
- case 3: dev_priv->vram_type = NV_MEM_TYPE_GDDR4; break;
- case 4: dev_priv->vram_type = NV_MEM_TYPE_GDDR5; break;
- default:
- break;
- }
-
- dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x100200) & 0x4);
- dev_priv->vram_size = nv_rd32(dev, 0x10020c);
- dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
- dev_priv->vram_size &= 0xffffffff00ULL;
-
- /* IGPs, no funky reordering happens here, they don't have VRAM */
- if (dev_priv->chipset == 0xaa ||
- dev_priv->chipset == 0xac ||
- dev_priv->chipset == 0xaf) {
- dev_priv->vram_sys_base = (u64)nv_rd32(dev, 0x100e10) << 12;
- rblock = 4096 >> 12;
- } else {
- rblock = nv50_vram_rblock(dev) >> 12;
- }
-
- length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
-
- return nouveau_mm_init(&vram->mm, rsvd_head, length, rblock);
-}
-
-void
-nv50_vram_fini(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
-
- nouveau_mm_fini(&vram->mm);
-}
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
- * more than just an enable/disable stub this needs to be split out to
- * nv98_bsp.c...
- */
-
-struct nv84_bsp_engine {
- struct nouveau_exec_engine base;
-};
-
-static int
-nv84_bsp_fini(struct drm_device *dev, int engine, bool suspend)
-{
- if (!(nv_rd32(dev, 0x000200) & 0x00008000))
- return 0;
-
- nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
- return 0;
-}
-
-static int
-nv84_bsp_init(struct drm_device *dev, int engine)
-{
- nv_mask(dev, 0x000200, 0x00008000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00008000, 0x00008000);
- return 0;
-}
-
-static void
-nv84_bsp_destroy(struct drm_device *dev, int engine)
-{
- struct nv84_bsp_engine *pbsp = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, BSP);
-
- kfree(pbsp);
-}
-
-int
-nv84_bsp_create(struct drm_device *dev)
-{
- struct nv84_bsp_engine *pbsp;
-
- pbsp = kzalloc(sizeof(*pbsp), GFP_KERNEL);
- if (!pbsp)
- return -ENOMEM;
-
- pbsp->base.destroy = nv84_bsp_destroy;
- pbsp->base.init = nv84_bsp_init;
- pbsp->base.fini = nv84_bsp_fini;
-
- NVOBJ_ENGINE_ADD(dev, BSP, &pbsp->base);
- return 0;
-}
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-struct nv84_crypt_engine {
- struct nouveau_exec_engine base;
-};
-
-static int
-nv84_crypt_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramin = chan->ramin;
- struct nouveau_gpuobj *ctx;
- int ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ctx);
- if (ret)
- return ret;
-
- nv_wo32(ramin, 0xa0, 0x00190000);
- nv_wo32(ramin, 0xa4, ctx->vinst + ctx->size - 1);
- nv_wo32(ramin, 0xa8, ctx->vinst);
- nv_wo32(ramin, 0xac, 0);
- nv_wo32(ramin, 0xb0, 0);
- nv_wo32(ramin, 0xb4, 0);
- dev_priv->engine.instmem.flush(dev);
-
- atomic_inc(&chan->vm->engref[engine]);
- chan->engctx[engine] = ctx;
- return 0;
-}
-
-static void
-nv84_crypt_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- u32 inst;
-
- inst = (chan->ramin->vinst >> 12);
- inst |= 0x80000000;
-
- /* mark context as invalid if still on the hardware, not
- * doing this causes issues the next time PCRYPT is used,
- * unsurprisingly :)
- */
- nv_wr32(dev, 0x10200c, 0x00000000);
- if (nv_rd32(dev, 0x102188) == inst)
- nv_mask(dev, 0x102188, 0x80000000, 0x00000000);
- if (nv_rd32(dev, 0x10218c) == inst)
- nv_mask(dev, 0x10218c, 0x80000000, 0x00000000);
- nv_wr32(dev, 0x10200c, 0x00000010);
-
- nouveau_gpuobj_ref(NULL, &ctx);
-
- atomic_dec(&chan->vm->engref[engine]);
- chan->engctx[engine] = NULL;
-}
-
-static int
-nv84_crypt_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *obj = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
- if (ret)
- return ret;
- obj->engine = 5;
- obj->class = class;
-
- nv_wo32(obj, 0x00, class);
- dev_priv->engine.instmem.flush(dev);
-
- ret = nouveau_ramht_insert(chan, handle, obj);
- nouveau_gpuobj_ref(NULL, &obj);
- return ret;
-}
-
-static void
-nv84_crypt_tlb_flush(struct drm_device *dev, int engine)
-{
- nv50_vm_flush_engine(dev, 0x0a);
-}
-
-static struct nouveau_bitfield nv84_crypt_intr[] = {
- { 0x00000001, "INVALID_STATE" },
- { 0x00000002, "ILLEGAL_MTHD" },
- { 0x00000004, "ILLEGAL_CLASS" },
- { 0x00000080, "QUERY" },
- { 0x00000100, "FAULT" },
- {}
-};
-
-static void
-nv84_crypt_isr(struct drm_device *dev)
-{
- u32 stat = nv_rd32(dev, 0x102130);
- u32 mthd = nv_rd32(dev, 0x102190);
- u32 data = nv_rd32(dev, 0x102194);
- u64 inst = (u64)(nv_rd32(dev, 0x102188) & 0x7fffffff) << 12;
- int show = nouveau_ratelimit();
- int chid = nv50_graph_isr_chid(dev, inst);
-
- if (show) {
- NV_INFO(dev, "PCRYPT:");
- nouveau_bitfield_print(nv84_crypt_intr, stat);
- printk(KERN_CONT " ch %d (0x%010llx) mthd 0x%04x data 0x%08x\n",
- chid, inst, mthd, data);
- }
-
- nv_wr32(dev, 0x102130, stat);
- nv_wr32(dev, 0x10200c, 0x10);
-
- nv50_fb_vm_trap(dev, show);
-}
-
-static int
-nv84_crypt_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_wr32(dev, 0x102140, 0x00000000);
- return 0;
-}
-
-static int
-nv84_crypt_init(struct drm_device *dev, int engine)
-{
- nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
-
- nv_wr32(dev, 0x102130, 0xffffffff);
- nv_wr32(dev, 0x102140, 0xffffffbf);
-
- nv_wr32(dev, 0x10200c, 0x00000010);
- return 0;
-}
-
-static void
-nv84_crypt_destroy(struct drm_device *dev, int engine)
-{
- struct nv84_crypt_engine *pcrypt = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, CRYPT);
-
- nouveau_irq_unregister(dev, 14);
- kfree(pcrypt);
-}
-
-int
-nv84_crypt_create(struct drm_device *dev)
-{
- struct nv84_crypt_engine *pcrypt;
-
- pcrypt = kzalloc(sizeof(*pcrypt), GFP_KERNEL);
- if (!pcrypt)
- return -ENOMEM;
-
- pcrypt->base.destroy = nv84_crypt_destroy;
- pcrypt->base.init = nv84_crypt_init;
- pcrypt->base.fini = nv84_crypt_fini;
- pcrypt->base.context_new = nv84_crypt_context_new;
- pcrypt->base.context_del = nv84_crypt_context_del;
- pcrypt->base.object_new = nv84_crypt_object_new;
- pcrypt->base.tlb_flush = nv84_crypt_tlb_flush;
-
- nouveau_irq_register(dev, 14, nv84_crypt_isr);
-
- NVOBJ_ENGINE_ADD(dev, CRYPT, &pcrypt->base);
- NVOBJ_CLASS (dev, 0x74c1, CRYPT);
- return 0;
-}
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
+#include <engine/fifo.h>
+#include <core/ramht.h>
#include "nouveau_fence.h"
struct nv84_fence_chan {
+++ /dev/null
-/*
- * Copyright (C) 2012 Ben Skeggs.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial
- * portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
- * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
-#include "nouveau_vm.h"
-
-struct nv84_fifo_priv {
- struct nouveau_fifo_priv base;
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
-};
-
-struct nv84_fifo_chan {
- struct nouveau_fifo_chan base;
- struct nouveau_gpuobj *ramfc;
- struct nouveau_gpuobj *cache;
-};
-
-static int
-nv84_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nv84_fifo_priv *priv = nv_engine(chan->dev, engine);
- struct nv84_fifo_chan *fctx;
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
- u64 instance;
- unsigned long flags;
- int ret;
-
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
- atomic_inc(&chan->vm->engref[engine]);
-
- chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
- NV50_USER(chan->id), PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
-
- ret = nouveau_gpuobj_new(dev, chan, 256, 256, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
- if (ret)
- goto error;
-
- instance = fctx->ramfc->vinst >> 8;
-
- ret = nouveau_gpuobj_new(dev, chan, 4096, 1024, 0, &fctx->cache);
- if (ret)
- goto error;
-
- nv_wo32(fctx->ramfc, 0x3c, 0x403f6078);
- nv_wo32(fctx->ramfc, 0x40, 0x00000000);
- nv_wo32(fctx->ramfc, 0x44, 0x01003fff);
- nv_wo32(fctx->ramfc, 0x48, chan->pushbuf->cinst >> 4);
- nv_wo32(fctx->ramfc, 0x50, lower_32_bits(ib_offset));
- nv_wo32(fctx->ramfc, 0x54, upper_32_bits(ib_offset) |
- drm_order(chan->dma.ib_max + 1) << 16);
- nv_wo32(fctx->ramfc, 0x60, 0x7fffffff);
- nv_wo32(fctx->ramfc, 0x78, 0x00000000);
- nv_wo32(fctx->ramfc, 0x7c, 0x30000001);
- nv_wo32(fctx->ramfc, 0x80, ((chan->ramht->bits - 9) << 27) |
- (4 << 24) /* SEARCH_FULL */ |
- (chan->ramht->gpuobj->cinst >> 4));
- nv_wo32(fctx->ramfc, 0x88, fctx->cache->vinst >> 10);
- nv_wo32(fctx->ramfc, 0x98, chan->ramin->vinst >> 12);
-
- nv_wo32(chan->ramin, 0x00, chan->id);
- nv_wo32(chan->ramin, 0x04, fctx->ramfc->vinst >> 8);
-
- dev_priv->engine.instmem.flush(dev);
-
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
- nv50_fifo_playlist_update(dev);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
-}
-
-static void
-nv84_fifo_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nv84_fifo_chan *fctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- unsigned long flags;
- u32 save;
-
- /* remove channel from playlist, will context switch if active */
- spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
- nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
- nv50_fifo_playlist_update(dev);
-
- save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
-
- /* tell any engines on this channel to unload their contexts */
- nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
- if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff))
- NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
-
- nv_wr32(dev, 0x002520, save);
-
- nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
- spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
-
- /* clean up */
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
- }
-
- nouveau_gpuobj_ref(NULL, &fctx->ramfc);
- nouveau_gpuobj_ref(NULL, &fctx->cache);
-
- atomic_dec(&chan->vm->engref[engine]);
- chan->engctx[engine] = NULL;
- kfree(fctx);
-}
-
-static int
-nv84_fifo_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv84_fifo_chan *fctx;
- u32 instance;
- int i;
-
- nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
- nv_wr32(dev, 0x00250c, 0x6f3cfc34);
- nv_wr32(dev, 0x002044, 0x01003fff);
-
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xffffffff);
-
- for (i = 0; i < 128; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
- if (chan && (fctx = chan->engctx[engine]))
- instance = 0x80000000 | fctx->ramfc->vinst >> 8;
- else
- instance = 0x00000000;
- nv_wr32(dev, 0x002600 + (i * 4), instance);
- }
-
- nv50_fifo_playlist_update(dev);
-
- nv_wr32(dev, 0x003200, 1);
- nv_wr32(dev, 0x003250, 1);
- nv_wr32(dev, 0x002500, 1);
- return 0;
-}
-
-static int
-nv84_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv84_fifo_priv *priv = nv_engine(dev, engine);
- int i;
- u32 save;
-
- /* set playlist length to zero, fifo will unload context */
- nv_wr32(dev, 0x0032ec, 0);
-
- save = nv_mask(dev, 0x002520, 0x0000003f, 0x15);
-
- /* tell all connected engines to unload their contexts */
- for (i = 0; i < priv->base.channels; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
- if (chan)
- nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
- if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
- NV_INFO(dev, "PFIFO: channel %d unload timeout\n", i);
- return -EBUSY;
- }
- }
-
- nv_wr32(dev, 0x002520, save);
- nv_wr32(dev, 0x002140, 0);
- return 0;
-}
-
-int
-nv84_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv84_fifo_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.base.destroy = nv50_fifo_destroy;
- priv->base.base.init = nv84_fifo_init;
- priv->base.base.fini = nv84_fifo_fini;
- priv->base.base.context_new = nv84_fifo_context_new;
- priv->base.base.context_del = nv84_fifo_context_del;
- priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
- priv->base.channels = 127;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
- if (ret)
- goto error;
-
- nouveau_irq_register(dev, 8, nv04_fifo_isr);
-error:
- if (ret)
- priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
- return ret;
-}
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-/*XXX: This stub is currently used on NV98+ also, as soon as this becomes
- * more than just an enable/disable stub this needs to be split out to
- * nv98_vp.c...
- */
-
-struct nv84_vp_engine {
- struct nouveau_exec_engine base;
-};
-
-static int
-nv84_vp_fini(struct drm_device *dev, int engine, bool suspend)
-{
- if (!(nv_rd32(dev, 0x000200) & 0x00020000))
- return 0;
-
- nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
- return 0;
-}
-
-static int
-nv84_vp_init(struct drm_device *dev, int engine)
-{
- nv_mask(dev, 0x000200, 0x00020000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00020000, 0x00020000);
- return 0;
-}
-
-static void
-nv84_vp_destroy(struct drm_device *dev, int engine)
-{
- struct nv84_vp_engine *pvp = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, VP);
-
- kfree(pvp);
-}
-
-int
-nv84_vp_create(struct drm_device *dev)
-{
- struct nv84_vp_engine *pvp;
-
- pvp = kzalloc(sizeof(*pvp), GFP_KERNEL);
- if (!pvp)
- return -ENOMEM;
-
- pvp->base.destroy = nv84_vp_destroy;
- pvp->base.init = nv84_vp_init;
- pvp->base.fini = nv84_vp_fini;
-
- NVOBJ_ENGINE_ADD(dev, VP, &pvp->base);
- return 0;
-}
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-#include "nv98_crypt.fuc.h"
-
-struct nv98_crypt_priv {
- struct nouveau_exec_engine base;
-};
-
-struct nv98_crypt_chan {
- struct nouveau_gpuobj *mem;
-};
-
-static int
-nv98_crypt_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nv98_crypt_priv *priv = nv_engine(dev, engine);
- struct nv98_crypt_chan *cctx;
- int ret;
-
- cctx = chan->engctx[engine] = kzalloc(sizeof(*cctx), GFP_KERNEL);
- if (!cctx)
- return -ENOMEM;
-
- atomic_inc(&chan->vm->engref[engine]);
-
- ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &cctx->mem);
- if (ret)
- goto error;
-
- nv_wo32(chan->ramin, 0xa0, 0x00190000);
- nv_wo32(chan->ramin, 0xa4, cctx->mem->vinst + cctx->mem->size - 1);
- nv_wo32(chan->ramin, 0xa8, cctx->mem->vinst);
- nv_wo32(chan->ramin, 0xac, 0x00000000);
- nv_wo32(chan->ramin, 0xb0, 0x00000000);
- nv_wo32(chan->ramin, 0xb4, 0x00000000);
- dev_priv->engine.instmem.flush(dev);
-
-error:
- if (ret)
- priv->base.context_del(chan, engine);
- return ret;
-}
-
-static void
-nv98_crypt_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nv98_crypt_chan *cctx = chan->engctx[engine];
- int i;
-
- for (i = 0xa0; i < 0xb4; i += 4)
- nv_wo32(chan->ramin, i, 0x00000000);
-
- nouveau_gpuobj_ref(NULL, &cctx->mem);
-
- atomic_dec(&chan->vm->engref[engine]);
- chan->engctx[engine] = NULL;
- kfree(cctx);
-}
-
-static int
-nv98_crypt_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct nv98_crypt_chan *cctx = chan->engctx[engine];
-
- /* fuc engine doesn't need an object, our ramht code does.. */
- cctx->mem->engine = 5;
- cctx->mem->class = class;
- return nouveau_ramht_insert(chan, handle, cctx->mem);
-}
-
-static void
-nv98_crypt_tlb_flush(struct drm_device *dev, int engine)
-{
- nv50_vm_flush_engine(dev, 0x0a);
-}
-
-static int
-nv98_crypt_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
- return 0;
-}
-
-static int
-nv98_crypt_init(struct drm_device *dev, int engine)
-{
- int i;
-
- /* reset! */
- nv_mask(dev, 0x000200, 0x00004000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00004000, 0x00004000);
-
- /* wait for exit interrupt to signal */
- nv_wait(dev, 0x087008, 0x00000010, 0x00000010);
- nv_wr32(dev, 0x087004, 0x00000010);
-
- /* upload microcode code and data segments */
- nv_wr32(dev, 0x087ff8, 0x00100000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_code); i++)
- nv_wr32(dev, 0x087ff4, nv98_pcrypt_code[i]);
-
- nv_wr32(dev, 0x087ff8, 0x00000000);
- for (i = 0; i < ARRAY_SIZE(nv98_pcrypt_data); i++)
- nv_wr32(dev, 0x087ff4, nv98_pcrypt_data[i]);
-
- /* start it running */
- nv_wr32(dev, 0x08710c, 0x00000000);
- nv_wr32(dev, 0x087104, 0x00000000); /* ENTRY */
- nv_wr32(dev, 0x087100, 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static struct nouveau_enum nv98_crypt_isr_error_name[] = {
- { 0x0000, "ILLEGAL_MTHD" },
- { 0x0001, "INVALID_BITFIELD" },
- { 0x0002, "INVALID_ENUM" },
- { 0x0003, "QUERY" },
- {}
-};
-
-static void
-nv98_crypt_isr(struct drm_device *dev)
-{
- u32 disp = nv_rd32(dev, 0x08701c);
- u32 stat = nv_rd32(dev, 0x087008) & disp & ~(disp >> 16);
- u32 inst = nv_rd32(dev, 0x087050) & 0x3fffffff;
- u32 ssta = nv_rd32(dev, 0x087040) & 0x0000ffff;
- u32 addr = nv_rd32(dev, 0x087040) >> 16;
- u32 mthd = (addr & 0x07ff) << 2;
- u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(dev, 0x087044);
- int chid = nv50_graph_isr_chid(dev, inst);
-
- if (stat & 0x00000040) {
- NV_INFO(dev, "PCRYPT: DISPATCH_ERROR [");
- nouveau_enum_print(nv98_crypt_isr_error_name, ssta);
- printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, mthd, data);
- nv_wr32(dev, 0x087004, 0x00000040);
- stat &= ~0x00000040;
- }
-
- if (stat) {
- NV_INFO(dev, "PCRYPT: unhandled intr 0x%08x\n", stat);
- nv_wr32(dev, 0x087004, stat);
- }
-
- nv50_fb_vm_trap(dev, 1);
-}
-
-static void
-nv98_crypt_destroy(struct drm_device *dev, int engine)
-{
- struct nv98_crypt_priv *priv = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 14);
- NVOBJ_ENGINE_DEL(dev, CRYPT);
- kfree(priv);
-}
-
-int
-nv98_crypt_create(struct drm_device *dev)
-{
- struct nv98_crypt_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.destroy = nv98_crypt_destroy;
- priv->base.init = nv98_crypt_init;
- priv->base.fini = nv98_crypt_fini;
- priv->base.context_new = nv98_crypt_context_new;
- priv->base.context_del = nv98_crypt_context_del;
- priv->base.object_new = nv98_crypt_object_new;
- priv->base.tlb_flush = nv98_crypt_tlb_flush;
-
- nouveau_irq_register(dev, 14, nv98_crypt_isr);
-
- NVOBJ_ENGINE_ADD(dev, CRYPT, &priv->base);
- NVOBJ_CLASS(dev, 0x88b4, CRYPT);
- return 0;
-}
+++ /dev/null
-/*
- * fuc microcode for nv98 pcrypt engine
- * Copyright (C) 2010 Marcin Kościelnicki
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-.section #nv98_pcrypt_data
-
-ctx_dma:
-ctx_dma_query: .b32 0
-ctx_dma_src: .b32 0
-ctx_dma_dst: .b32 0
-.equ #dma_count 3
-ctx_query_address_high: .b32 0
-ctx_query_address_low: .b32 0
-ctx_query_counter: .b32 0
-ctx_cond_address_high: .b32 0
-ctx_cond_address_low: .b32 0
-ctx_cond_off: .b32 0
-ctx_src_address_high: .b32 0
-ctx_src_address_low: .b32 0
-ctx_dst_address_high: .b32 0
-ctx_dst_address_low: .b32 0
-ctx_mode: .b32 0
-.align 16
-ctx_key: .skip 16
-ctx_iv: .skip 16
-
-.align 0x80
-swap:
-.skip 32
-
-.align 8
-common_cmd_dtable:
-.b32 #ctx_query_address_high + 0x20000 ~0xff
-.b32 #ctx_query_address_low + 0x20000 ~0xfffffff0
-.b32 #ctx_query_counter + 0x20000 ~0xffffffff
-.b32 #cmd_query_get + 0x00000 ~1
-.b32 #ctx_cond_address_high + 0x20000 ~0xff
-.b32 #ctx_cond_address_low + 0x20000 ~0xfffffff0
-.b32 #cmd_cond_mode + 0x00000 ~7
-.b32 #cmd_wrcache_flush + 0x00000 ~0
-.equ #common_cmd_max 0x88
-
-
-.align 8
-engine_cmd_dtable:
-.b32 #ctx_key + 0x0 + 0x20000 ~0xffffffff
-.b32 #ctx_key + 0x4 + 0x20000 ~0xffffffff
-.b32 #ctx_key + 0x8 + 0x20000 ~0xffffffff
-.b32 #ctx_key + 0xc + 0x20000 ~0xffffffff
-.b32 #ctx_iv + 0x0 + 0x20000 ~0xffffffff
-.b32 #ctx_iv + 0x4 + 0x20000 ~0xffffffff
-.b32 #ctx_iv + 0x8 + 0x20000 ~0xffffffff
-.b32 #ctx_iv + 0xc + 0x20000 ~0xffffffff
-.b32 #ctx_src_address_high + 0x20000 ~0xff
-.b32 #ctx_src_address_low + 0x20000 ~0xfffffff0
-.b32 #ctx_dst_address_high + 0x20000 ~0xff
-.b32 #ctx_dst_address_low + 0x20000 ~0xfffffff0
-.b32 #crypt_cmd_mode + 0x00000 ~0xf
-.b32 #crypt_cmd_length + 0x10000 ~0x0ffffff0
-.equ #engine_cmd_max 0xce
-
-.align 4
-crypt_dtable:
-.b16 #crypt_copy_prep #crypt_do_inout
-.b16 #crypt_store_prep #crypt_do_out
-.b16 #crypt_ecb_e_prep #crypt_do_inout
-.b16 #crypt_ecb_d_prep #crypt_do_inout
-.b16 #crypt_cbc_e_prep #crypt_do_inout
-.b16 #crypt_cbc_d_prep #crypt_do_inout
-.b16 #crypt_pcbc_e_prep #crypt_do_inout
-.b16 #crypt_pcbc_d_prep #crypt_do_inout
-.b16 #crypt_cfb_e_prep #crypt_do_inout
-.b16 #crypt_cfb_d_prep #crypt_do_inout
-.b16 #crypt_ofb_prep #crypt_do_inout
-.b16 #crypt_ctr_prep #crypt_do_inout
-.b16 #crypt_cbc_mac_prep #crypt_do_in
-.b16 #crypt_cmac_finish_complete_prep #crypt_do_in
-.b16 #crypt_cmac_finish_partial_prep #crypt_do_in
-
-.align 0x100
-
-.section #nv98_pcrypt_code
-
- // $r0 is always set to 0 in our code - this allows some space savings.
- clear b32 $r0
-
- // set up the interrupt handler
- mov $r1 #ih
- mov $iv0 $r1
-
- // init stack pointer
- mov $sp $r0
-
- // set interrupt dispatch - route timer, fifo, ctxswitch to i0, others to host
- movw $r1 0xfff0
- sethi $r1 0
- mov $r2 0x400
- iowr I[$r2 + 0x300] $r1
-
- // enable the interrupts
- or $r1 0xc
- iowr I[$r2] $r1
-
- // enable fifo access and context switching
- mov $r1 3
- mov $r2 0x1200
- iowr I[$r2] $r1
-
- // enable i0 delivery
- bset $flags ie0
-
- // sleep forver, waking only for interrupts.
- bset $flags $p0
- spin:
- sleep $p0
- bra #spin
-
-// i0 handler
-ih:
- // see which interrupts we got
- iord $r1 I[$r0 + 0x200]
-
- and $r2 $r1 0x8
- cmpu b32 $r2 0
- bra e #noctx
-
- // context switch... prepare the regs for xfer
- mov $r2 0x7700
- mov $xtargets $r2
- mov $xdbase $r0
- // 128-byte context.
- mov $r2 0
- sethi $r2 0x50000
-
- // read current channel
- mov $r3 0x1400
- iord $r4 I[$r3]
- // if bit 30 set, it's active, so we have to unload it first.
- shl b32 $r5 $r4 1
- cmps b32 $r5 0
- bra nc #ctxload
-
- // unload the current channel - save the context
- xdst $r0 $r2
- xdwait
- // and clear bit 30, then write back
- bclr $r4 0x1e
- iowr I[$r3] $r4
- // tell PFIFO we unloaded
- mov $r4 1
- iowr I[$r3 + 0x200] $r4
-
- bra #noctx
-
- ctxload:
- // no channel loaded - perhaps we're requested to load one
- iord $r4 I[$r3 + 0x100]
- shl b32 $r15 $r4 1
- cmps b32 $r15 0
- // if bit 30 of next channel not set, probably PFIFO is just
- // killing a context. do a faux load, without the active bit.
- bra nc #dummyload
-
- // ok, do a real context load.
- xdld $r0 $r2
- xdwait
- mov $r5 #ctx_dma
- mov $r6 #dma_count - 1
- ctxload_dma_loop:
- ld b32 $r7 D[$r5 + $r6 * 4]
- add b32 $r8 $r6 0x180
- shl b32 $r8 8
- iowr I[$r8] $r7
- sub b32 $r6 1
- bra nc #ctxload_dma_loop
-
- dummyload:
- // tell PFIFO we're done
- mov $r5 2
- iowr I[$r3 + 0x200] $r5
-
- noctx:
- and $r2 $r1 0x4
- cmpu b32 $r2 0
- bra e #nocmd
-
- // incoming fifo command.
- mov $r3 0x1900
- iord $r2 I[$r3 + 0x100]
- iord $r3 I[$r3]
- // extract the method
- and $r4 $r2 0x7ff
- // shift the addr to proper position if we need to interrupt later
- shl b32 $r2 0x10
-
- // mthd 0 and 0x100 [NAME, NOP]: ignore
- and $r5 $r4 0x7bf
- cmpu b32 $r5 0
- bra e #cmddone
-
- mov $r5 #engine_cmd_dtable - 0xc0 * 8
- mov $r6 #engine_cmd_max
- cmpu b32 $r4 0xc0
- bra nc #dtable_cmd
- mov $r5 #common_cmd_dtable - 0x80 * 8
- mov $r6 #common_cmd_max
- cmpu b32 $r4 0x80
- bra nc #dtable_cmd
- cmpu b32 $r4 0x60
- bra nc #dma_cmd
- cmpu b32 $r4 0x50
- bra ne #illegal_mthd
-
- // mthd 0x140: PM_TRIGGER
- mov $r2 0x2200
- clear b32 $r3
- sethi $r3 0x20000
- iowr I[$r2] $r3
- bra #cmddone
-
- dma_cmd:
- // mthd 0x180...: DMA_*
- cmpu b32 $r4 0x60+#dma_count
- bra nc #illegal_mthd
- shl b32 $r5 $r4 2
- add b32 $r5 (#ctx_dma - 0x60 * 4) & 0xffff
- bset $r3 0x1e
- st b32 D[$r5] $r3
- add b32 $r4 0x180 - 0x60
- shl b32 $r4 8
- iowr I[$r4] $r3
- bra #cmddone
-
- dtable_cmd:
- cmpu b32 $r4 $r6
- bra nc #illegal_mthd
- shl b32 $r4 3
- add b32 $r4 $r5
- ld b32 $r5 D[$r4 + 4]
- and $r5 $r3
- cmpu b32 $r5 0
- bra ne #invalid_bitfield
- ld b16 $r5 D[$r4]
- ld b16 $r6 D[$r4 + 2]
- cmpu b32 $r6 2
- bra e #cmd_setctx
- ld b32 $r7 D[$r0 + #ctx_cond_off]
- and $r6 $r7
- cmpu b32 $r6 1
- bra e #cmddone
- call $r5
- bra $p1 #dispatch_error
- bra #cmddone
-
- cmd_setctx:
- st b32 D[$r5] $r3
- bra #cmddone
-
-
- invalid_bitfield:
- or $r2 1
- dispatch_error:
- illegal_mthd:
- mov $r4 0x1000
- iowr I[$r4] $r2
- iowr I[$r4 + 0x100] $r3
- mov $r4 0x40
- iowr I[$r0] $r4
-
- im_loop:
- iord $r4 I[$r0 + 0x200]
- and $r4 0x40
- cmpu b32 $r4 0
- bra ne #im_loop
-
- cmddone:
- // remove the command from FIFO
- mov $r3 0x1d00
- mov $r4 1
- iowr I[$r3] $r4
-
- nocmd:
- // ack the processed interrupts
- and $r1 $r1 0xc
- iowr I[$r0 + 0x100] $r1
-iret
-
-cmd_query_get:
- // if bit 0 of param set, trigger interrupt afterwards.
- setp $p1 $r3
- or $r2 3
-
- // read PTIMER, beware of races...
- mov $r4 0xb00
- ptimer_retry:
- iord $r6 I[$r4 + 0x100]
- iord $r5 I[$r4]
- iord $r7 I[$r4 + 0x100]
- cmpu b32 $r6 $r7
- bra ne #ptimer_retry
-
- // prepare the query structure
- ld b32 $r4 D[$r0 + #ctx_query_counter]
- st b32 D[$r0 + #swap + 0x0] $r4
- st b32 D[$r0 + #swap + 0x4] $r0
- st b32 D[$r0 + #swap + 0x8] $r5
- st b32 D[$r0 + #swap + 0xc] $r6
-
- // will use target 0, DMA_QUERY.
- mov $xtargets $r0
-
- ld b32 $r4 D[$r0 + #ctx_query_address_high]
- shl b32 $r4 0x18
- mov $xdbase $r4
-
- ld b32 $r4 D[$r0 + #ctx_query_address_low]
- mov $r5 #swap
- sethi $r5 0x20000
- xdst $r4 $r5
- xdwait
-
- ret
-
-cmd_cond_mode:
- // if >= 5, INVALID_ENUM
- bset $flags $p1
- or $r2 2
- cmpu b32 $r3 5
- bra nc #return
-
- // otherwise, no error.
- bclr $flags $p1
-
- // if < 2, no QUERY object is involved
- cmpu b32 $r3 2
- bra nc #cmd_cond_mode_queryful
-
- xor $r3 1
- st b32 D[$r0 + #ctx_cond_off] $r3
- return:
- ret
-
- cmd_cond_mode_queryful:
- // ok, will need to pull a QUERY object, prepare offsets
- ld b32 $r4 D[$r0 + #ctx_cond_address_high]
- ld b32 $r5 D[$r0 + #ctx_cond_address_low]
- and $r6 $r5 0xff
- shr b32 $r5 8
- shl b32 $r4 0x18
- or $r4 $r5
- mov $xdbase $r4
- mov $xtargets $r0
-
- // pull the first one
- mov $r5 #swap
- sethi $r5 0x20000
- xdld $r6 $r5
-
- // if == 2, only a single QUERY is involved...
- cmpu b32 $r3 2
- bra ne #cmd_cond_mode_double
-
- xdwait
- ld b32 $r4 D[$r0 + #swap + 4]
- cmpu b32 $r4 0
- xbit $r4 $flags z
- st b32 D[$r0 + #ctx_cond_off] $r4
- ret
-
- // ok, we'll need to pull second one too
- cmd_cond_mode_double:
- add b32 $r6 0x10
- add b32 $r5 0x10
- xdld $r6 $r5
- xdwait
-
- // compare COUNTERs
- ld b32 $r5 D[$r0 + #swap + 0x00]
- ld b32 $r6 D[$r0 + #swap + 0x10]
- cmpu b32 $r5 $r6
- xbit $r4 $flags z
-
- // compare RESen
- ld b32 $r5 D[$r0 + #swap + 0x04]
- ld b32 $r6 D[$r0 + #swap + 0x14]
- cmpu b32 $r5 $r6
- xbit $r5 $flags z
- and $r4 $r5
-
- // and negate or not, depending on mode
- cmpu b32 $r3 3
- xbit $r5 $flags z
- xor $r4 $r5
- st b32 D[$r0 + #ctx_cond_off] $r4
- ret
-
-cmd_wrcache_flush:
- bclr $flags $p1
- mov $r2 0x2200
- clear b32 $r3
- sethi $r3 0x10000
- iowr I[$r2] $r3
- ret
-
-crypt_cmd_mode:
- // if >= 0xf, INVALID_ENUM
- bset $flags $p1
- or $r2 2
- cmpu b32 $r3 0xf
- bra nc #crypt_cmd_mode_return
-
- bclr $flags $p1
- st b32 D[$r0 + #ctx_mode] $r3
-
- crypt_cmd_mode_return:
- ret
-
-crypt_cmd_length:
- // nop if length == 0
- cmpu b32 $r3 0
- bra e #crypt_cmd_mode_return
-
- // init key, IV
- cxset 3
- mov $r4 #ctx_key
- sethi $r4 0x70000
- xdst $r0 $r4
- mov $r4 #ctx_iv
- sethi $r4 0x60000
- xdst $r0 $r4
- xdwait
- ckeyreg $c7
-
- // prepare the targets
- mov $r4 0x2100
- mov $xtargets $r4
-
- // prepare src address
- ld b32 $r4 D[$r0 + #ctx_src_address_high]
- ld b32 $r5 D[$r0 + #ctx_src_address_low]
- shr b32 $r8 $r5 8
- shl b32 $r4 0x18
- or $r4 $r8
- and $r5 $r5 0xff
-
- // prepare dst address
- ld b32 $r6 D[$r0 + #ctx_dst_address_high]
- ld b32 $r7 D[$r0 + #ctx_dst_address_low]
- shr b32 $r8 $r7 8
- shl b32 $r6 0x18
- or $r6 $r8
- and $r7 $r7 0xff
-
- // find the proper prep & do functions
- ld b32 $r8 D[$r0 + #ctx_mode]
- shl b32 $r8 2
-
- // run prep
- ld b16 $r9 D[$r8 + #crypt_dtable]
- call $r9
-
- // do it
- ld b16 $r9 D[$r8 + #crypt_dtable + 2]
- call $r9
- cxset 1
- xdwait
- cxset 0x61
- xdwait
- xdwait
-
- // update src address
- shr b32 $r8 $r4 0x18
- shl b32 $r9 $r4 8
- add b32 $r9 $r5
- adc b32 $r8 0
- st b32 D[$r0 + #ctx_src_address_high] $r8
- st b32 D[$r0 + #ctx_src_address_low] $r9
-
- // update dst address
- shr b32 $r8 $r6 0x18
- shl b32 $r9 $r6 8
- add b32 $r9 $r7
- adc b32 $r8 0
- st b32 D[$r0 + #ctx_dst_address_high] $r8
- st b32 D[$r0 + #ctx_dst_address_low] $r9
-
- // pull updated IV
- cxset 2
- mov $r4 #ctx_iv
- sethi $r4 0x60000
- xdld $r0 $r4
- xdwait
-
- ret
-
-
-crypt_copy_prep:
- cs0begin 2
- cxsin $c0
- cxsout $c0
- ret
-
-crypt_store_prep:
- cs0begin 1
- cxsout $c6
- ret
-
-crypt_ecb_e_prep:
- cs0begin 3
- cxsin $c0
- cenc $c0 $c0
- cxsout $c0
- ret
-
-crypt_ecb_d_prep:
- ckexp $c7 $c7
- cs0begin 3
- cxsin $c0
- cdec $c0 $c0
- cxsout $c0
- ret
-
-crypt_cbc_e_prep:
- cs0begin 4
- cxsin $c0
- cxor $c6 $c0
- cenc $c6 $c6
- cxsout $c6
- ret
-
-crypt_cbc_d_prep:
- ckexp $c7 $c7
- cs0begin 5
- cmov $c2 $c6
- cxsin $c6
- cdec $c0 $c6
- cxor $c0 $c2
- cxsout $c0
- ret
-
-crypt_pcbc_e_prep:
- cs0begin 5
- cxsin $c0
- cxor $c6 $c0
- cenc $c6 $c6
- cxsout $c6
- cxor $c6 $c0
- ret
-
-crypt_pcbc_d_prep:
- ckexp $c7 $c7
- cs0begin 5
- cxsin $c0
- cdec $c1 $c0
- cxor $c6 $c1
- cxsout $c6
- cxor $c6 $c0
- ret
-
-crypt_cfb_e_prep:
- cs0begin 4
- cenc $c6 $c6
- cxsin $c0
- cxor $c6 $c0
- cxsout $c6
- ret
-
-crypt_cfb_d_prep:
- cs0begin 4
- cenc $c0 $c6
- cxsin $c6
- cxor $c0 $c6
- cxsout $c0
- ret
-
-crypt_ofb_prep:
- cs0begin 4
- cenc $c6 $c6
- cxsin $c0
- cxor $c0 $c6
- cxsout $c0
- ret
-
-crypt_ctr_prep:
- cs0begin 5
- cenc $c1 $c6
- cadd $c6 1
- cxsin $c0
- cxor $c0 $c1
- cxsout $c0
- ret
-
-crypt_cbc_mac_prep:
- cs0begin 3
- cxsin $c0
- cxor $c6 $c0
- cenc $c6 $c6
- ret
-
-crypt_cmac_finish_complete_prep:
- cs0begin 7
- cxsin $c0
- cxor $c6 $c0
- cxor $c0 $c0
- cenc $c0 $c0
- cprecmac $c0 $c0
- cxor $c6 $c0
- cenc $c6 $c6
- ret
-
-crypt_cmac_finish_partial_prep:
- cs0begin 8
- cxsin $c0
- cxor $c6 $c0
- cxor $c0 $c0
- cenc $c0 $c0
- cprecmac $c0 $c0
- cprecmac $c0 $c0
- cxor $c6 $c0
- cenc $c6 $c6
- ret
-
-// TODO
-crypt_do_in:
- add b32 $r3 $r5
- mov $xdbase $r4
- mov $r9 #swap
- sethi $r9 0x20000
- crypt_do_in_loop:
- xdld $r5 $r9
- xdwait
- cxset 0x22
- xdst $r0 $r9
- cs0exec 1
- xdwait
- add b32 $r5 0x10
- cmpu b32 $r5 $r3
- bra ne #crypt_do_in_loop
- cxset 1
- xdwait
- ret
-
-crypt_do_out:
- add b32 $r3 $r7
- mov $xdbase $r6
- mov $r9 #swap
- sethi $r9 0x20000
- crypt_do_out_loop:
- cs0exec 1
- cxset 0x61
- xdld $r7 $r9
- xdst $r7 $r9
- cxset 1
- xdwait
- add b32 $r7 0x10
- cmpu b32 $r7 $r3
- bra ne #crypt_do_out_loop
- ret
-
-crypt_do_inout:
- add b32 $r3 $r5
- mov $r9 #swap
- sethi $r9 0x20000
- crypt_do_inout_loop:
- mov $xdbase $r4
- xdld $r5 $r9
- xdwait
- cxset 0x21
- xdst $r0 $r9
- cs0exec 1
- cxset 0x61
- mov $xdbase $r6
- xdld $r7 $r9
- xdst $r7 $r9
- cxset 1
- xdwait
- add b32 $r5 0x10
- add b32 $r7 0x10
- cmpu b32 $r5 $r3
- bra ne #crypt_do_inout_loop
- ret
-
-.align 0x100
+++ /dev/null
-uint32_t nv98_pcrypt_data[] = {
-/* 0x0000: ctx_dma */
-/* 0x0000: ctx_dma_query */
- 0x00000000,
-/* 0x0004: ctx_dma_src */
- 0x00000000,
-/* 0x0008: ctx_dma_dst */
- 0x00000000,
-/* 0x000c: ctx_query_address_high */
- 0x00000000,
-/* 0x0010: ctx_query_address_low */
- 0x00000000,
-/* 0x0014: ctx_query_counter */
- 0x00000000,
-/* 0x0018: ctx_cond_address_high */
- 0x00000000,
-/* 0x001c: ctx_cond_address_low */
- 0x00000000,
-/* 0x0020: ctx_cond_off */
- 0x00000000,
-/* 0x0024: ctx_src_address_high */
- 0x00000000,
-/* 0x0028: ctx_src_address_low */
- 0x00000000,
-/* 0x002c: ctx_dst_address_high */
- 0x00000000,
-/* 0x0030: ctx_dst_address_low */
- 0x00000000,
-/* 0x0034: ctx_mode */
- 0x00000000,
- 0x00000000,
- 0x00000000,
-/* 0x0040: ctx_key */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-/* 0x0050: ctx_iv */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-/* 0x0080: swap */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-/* 0x00a0: common_cmd_dtable */
- 0x0002000c,
- 0xffffff00,
- 0x00020010,
- 0x0000000f,
- 0x00020014,
- 0x00000000,
- 0x00000192,
- 0xfffffffe,
- 0x00020018,
- 0xffffff00,
- 0x0002001c,
- 0x0000000f,
- 0x000001d7,
- 0xfffffff8,
- 0x00000260,
- 0xffffffff,
-/* 0x00e0: engine_cmd_dtable */
- 0x00020040,
- 0x00000000,
- 0x00020044,
- 0x00000000,
- 0x00020048,
- 0x00000000,
- 0x0002004c,
- 0x00000000,
- 0x00020050,
- 0x00000000,
- 0x00020054,
- 0x00000000,
- 0x00020058,
- 0x00000000,
- 0x0002005c,
- 0x00000000,
- 0x00020024,
- 0xffffff00,
- 0x00020028,
- 0x0000000f,
- 0x0002002c,
- 0xffffff00,
- 0x00020030,
- 0x0000000f,
- 0x00000271,
- 0xfffffff0,
- 0x00010285,
- 0xf000000f,
-/* 0x0150: crypt_dtable */
- 0x04db0321,
- 0x04b1032f,
- 0x04db0339,
- 0x04db034b,
- 0x04db0361,
- 0x04db0377,
- 0x04db0395,
- 0x04db03af,
- 0x04db03cd,
- 0x04db03e3,
- 0x04db03f9,
- 0x04db040f,
- 0x04830429,
- 0x0483043b,
- 0x0483045d,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
-
-uint32_t nv98_pcrypt_code[] = {
- 0x17f004bd,
- 0x0010fe35,
- 0xf10004fe,
- 0xf0fff017,
- 0x27f10013,
- 0x21d00400,
- 0x0c15f0c0,
- 0xf00021d0,
- 0x27f10317,
- 0x21d01200,
- 0x1031f400,
-/* 0x002f: spin */
- 0xf40031f4,
- 0x0ef40028,
-/* 0x0035: ih */
- 0x8001cffd,
- 0xb00812c4,
- 0x0bf40024,
- 0x0027f167,
- 0x002bfe77,
- 0xf00007fe,
- 0x23f00027,
- 0x0037f105,
- 0x0034cf14,
- 0xb0014594,
- 0x18f40055,
- 0x0602fa17,
- 0x4af003f8,
- 0x0034d01e,
- 0xd00147f0,
- 0x0ef48034,
-/* 0x0075: ctxload */
- 0x4034cf33,
- 0xb0014f94,
- 0x18f400f5,
- 0x0502fa21,
- 0x57f003f8,
- 0x0267f000,
-/* 0x008c: ctxload_dma_loop */
- 0xa07856bc,
- 0xb6018068,
- 0x87d00884,
- 0x0162b600,
-/* 0x009f: dummyload */
- 0xf0f018f4,
- 0x35d00257,
-/* 0x00a5: noctx */
- 0x0412c480,
- 0xf50024b0,
- 0xf100df0b,
- 0xcf190037,
- 0x33cf4032,
- 0xff24e400,
- 0x1024b607,
- 0x07bf45e4,
- 0xf50054b0,
- 0xf100b90b,
- 0xf1fae057,
- 0xb000ce67,
- 0x18f4c044,
- 0xa057f14d,
- 0x8867f1fc,
- 0x8044b000,
- 0xb03f18f4,
- 0x18f46044,
- 0x5044b019,
- 0xf1741bf4,
- 0xbd220027,
- 0x0233f034,
- 0xf50023d0,
-/* 0x0103: dma_cmd */
- 0xb000810e,
- 0x18f46344,
- 0x0245945e,
- 0xfe8050b7,
- 0x801e39f0,
- 0x40b70053,
- 0x44b60120,
- 0x0043d008,
-/* 0x0123: dtable_cmd */
- 0xb8600ef4,
- 0x18f40446,
- 0x0344b63e,
- 0x980045bb,
- 0x53fd0145,
- 0x0054b004,
- 0x58291bf4,
- 0x46580045,
- 0x0264b001,
- 0x98170bf4,
- 0x67fd0807,
- 0x0164b004,
- 0xf9300bf4,
- 0x0f01f455,
-/* 0x015b: cmd_setctx */
- 0x80280ef4,
- 0x0ef40053,
-/* 0x0161: invalid_bitfield */
- 0x0125f022,
-/* 0x0164: dispatch_error */
-/* 0x0164: illegal_mthd */
- 0x100047f1,
- 0xd00042d0,
- 0x47f04043,
- 0x0004d040,
-/* 0x0174: im_loop */
- 0xf08004cf,
- 0x44b04044,
- 0xf71bf400,
-/* 0x0180: cmddone */
- 0x1d0037f1,
- 0xd00147f0,
-/* 0x018a: nocmd */
- 0x11c40034,
- 0x4001d00c,
-/* 0x0192: cmd_query_get */
- 0x38f201f8,
- 0x0325f001,
- 0x0b0047f1,
-/* 0x019c: ptimer_retry */
- 0xcf4046cf,
- 0x47cf0045,
- 0x0467b840,
- 0x98f41bf4,
- 0x04800504,
- 0x21008020,
- 0x80220580,
- 0x0bfe2306,
- 0x03049800,
- 0xfe1844b6,
- 0x04980047,
- 0x8057f104,
- 0x0253f000,
- 0xf80645fa,
-/* 0x01d7: cmd_cond_mode */
- 0xf400f803,
- 0x25f00131,
- 0x0534b002,
- 0xf41218f4,
- 0x34b00132,
- 0x0b18f402,
- 0x800136f0,
-/* 0x01f2: return */
- 0x00f80803,
-/* 0x01f4: cmd_cond_mode_queryful */
- 0x98060498,
- 0x56c40705,
- 0x0855b6ff,
- 0xfd1844b6,
- 0x47fe0545,
- 0x000bfe00,
- 0x008057f1,
- 0xfa0253f0,
- 0x34b00565,
- 0x131bf402,
- 0x049803f8,
- 0x0044b021,
- 0x800b4cf0,
- 0x00f80804,
-/* 0x022c: cmd_cond_mode_double */
- 0xb61060b6,
- 0x65fa1050,
- 0x9803f805,
- 0x06982005,
- 0x0456b824,
- 0x980b4cf0,
- 0x06982105,
- 0x0456b825,
- 0xfd0b5cf0,
- 0x34b00445,
- 0x0b5cf003,
- 0x800645fd,
- 0x00f80804,
-/* 0x0260: cmd_wrcache_flush */
- 0xf10132f4,
- 0xbd220027,
- 0x0133f034,
- 0xf80023d0,
-/* 0x0271: crypt_cmd_mode */
- 0x0131f400,
- 0xb00225f0,
- 0x18f40f34,
- 0x0132f409,
-/* 0x0283: crypt_cmd_mode_return */
- 0xf80d0380,
-/* 0x0285: crypt_cmd_length */
- 0x0034b000,
- 0xf4fb0bf4,
- 0x47f0033c,
- 0x0743f040,
- 0xf00604fa,
- 0x43f05047,
- 0x0604fa06,
- 0x3cf503f8,
- 0x47f1c407,
- 0x4bfe2100,
- 0x09049800,
- 0x950a0598,
- 0x44b60858,
- 0x0548fd18,
- 0x98ff55c4,
- 0x07980b06,
- 0x0878950c,
- 0xfd1864b6,
- 0x77c40568,
- 0x0d0898ff,
- 0x580284b6,
- 0x95f9a889,
- 0xf9a98958,
- 0x013cf495,
- 0x3cf403f8,
- 0xf803f861,
- 0x18489503,
- 0xbb084994,
- 0x81b60095,
- 0x09088000,
- 0x950a0980,
- 0x69941868,
- 0x0097bb08,
- 0x800081b6,
- 0x09800b08,
- 0x023cf40c,
- 0xf05047f0,
- 0x04fa0643,
- 0xf803f805,
-/* 0x0321: crypt_copy_prep */
- 0x203cf500,
- 0x003cf594,
- 0x003cf588,
-/* 0x032f: crypt_store_prep */
- 0xf500f88c,
- 0xf594103c,
- 0xf88c063c,
-/* 0x0339: crypt_ecb_e_prep */
- 0x303cf500,
- 0x003cf594,
- 0x003cf588,
- 0x003cf5d0,
-/* 0x034b: crypt_ecb_d_prep */
- 0xf500f88c,
- 0xf5c8773c,
- 0xf594303c,
- 0xf588003c,
- 0xf5d4003c,
- 0xf88c003c,
-/* 0x0361: crypt_cbc_e_prep */
- 0x403cf500,
- 0x003cf594,
- 0x063cf588,
- 0x663cf5ac,
- 0x063cf5d0,
-/* 0x0377: crypt_cbc_d_prep */
- 0xf500f88c,
- 0xf5c8773c,
- 0xf594503c,
- 0xf584623c,
- 0xf588063c,
- 0xf5d4603c,
- 0xf5ac203c,
- 0xf88c003c,
-/* 0x0395: crypt_pcbc_e_prep */
- 0x503cf500,
- 0x003cf594,
- 0x063cf588,
- 0x663cf5ac,
- 0x063cf5d0,
- 0x063cf58c,
-/* 0x03af: crypt_pcbc_d_prep */
- 0xf500f8ac,
- 0xf5c8773c,
- 0xf594503c,
- 0xf588003c,
- 0xf5d4013c,
- 0xf5ac163c,
- 0xf58c063c,
- 0xf8ac063c,
-/* 0x03cd: crypt_cfb_e_prep */
- 0x403cf500,
- 0x663cf594,
- 0x003cf5d0,
- 0x063cf588,
- 0x063cf5ac,
-/* 0x03e3: crypt_cfb_d_prep */
- 0xf500f88c,
- 0xf594403c,
- 0xf5d0603c,
- 0xf588063c,
- 0xf5ac603c,
- 0xf88c003c,
-/* 0x03f9: crypt_ofb_prep */
- 0x403cf500,
- 0x663cf594,
- 0x003cf5d0,
- 0x603cf588,
- 0x003cf5ac,
-/* 0x040f: crypt_ctr_prep */
- 0xf500f88c,
- 0xf594503c,
- 0xf5d0613c,
- 0xf5b0163c,
- 0xf588003c,
- 0xf5ac103c,
- 0xf88c003c,
-/* 0x0429: crypt_cbc_mac_prep */
- 0x303cf500,
- 0x003cf594,
- 0x063cf588,
- 0x663cf5ac,
-/* 0x043b: crypt_cmac_finish_complete_prep */
- 0xf500f8d0,
- 0xf594703c,
- 0xf588003c,
- 0xf5ac063c,
- 0xf5ac003c,
- 0xf5d0003c,
- 0xf5bc003c,
- 0xf5ac063c,
- 0xf8d0663c,
-/* 0x045d: crypt_cmac_finish_partial_prep */
- 0x803cf500,
- 0x003cf594,
- 0x063cf588,
- 0x003cf5ac,
- 0x003cf5ac,
- 0x003cf5d0,
- 0x003cf5bc,
- 0x063cf5bc,
- 0x663cf5ac,
-/* 0x0483: crypt_do_in */
- 0xbb00f8d0,
- 0x47fe0035,
- 0x8097f100,
- 0x0293f000,
-/* 0x0490: crypt_do_in_loop */
- 0xf80559fa,
- 0x223cf403,
- 0xf50609fa,
- 0xf898103c,
- 0x1050b603,
- 0xf40453b8,
- 0x3cf4e91b,
- 0xf803f801,
-/* 0x04b1: crypt_do_out */
- 0x0037bb00,
- 0xf10067fe,
- 0xf0008097,
-/* 0x04be: crypt_do_out_loop */
- 0x3cf50293,
- 0x3cf49810,
- 0x0579fa61,
- 0xf40679fa,
- 0x03f8013c,
- 0xb81070b6,
- 0x1bf40473,
-/* 0x04db: crypt_do_inout */
- 0xbb00f8e8,
- 0x97f10035,
- 0x93f00080,
-/* 0x04e5: crypt_do_inout_loop */
- 0x0047fe02,
- 0xf80559fa,
- 0x213cf403,
- 0xf50609fa,
- 0xf498103c,
- 0x67fe613c,
- 0x0579fa00,
- 0xf40679fa,
- 0x03f8013c,
- 0xb61050b6,
- 0x53b81070,
- 0xd41bf404,
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-
-struct nv98_ppp_engine {
- struct nouveau_exec_engine base;
-};
-
-static int
-nv98_ppp_fini(struct drm_device *dev, int engine, bool suspend)
-{
- if (!(nv_rd32(dev, 0x000200) & 0x00000002))
- return 0;
-
- nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
- return 0;
-}
-
-static int
-nv98_ppp_init(struct drm_device *dev, int engine)
-{
- nv_mask(dev, 0x000200, 0x00000002, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000002, 0x00000002);
- return 0;
-}
-
-static void
-nv98_ppp_destroy(struct drm_device *dev, int engine)
-{
- struct nv98_ppp_engine *pppp = nv_engine(dev, engine);
-
- NVOBJ_ENGINE_DEL(dev, PPP);
-
- kfree(pppp);
-}
-
-int
-nv98_ppp_create(struct drm_device *dev)
-{
- struct nv98_ppp_engine *pppp;
-
- pppp = kzalloc(sizeof(*pppp), GFP_KERNEL);
- if (!pppp)
- return -ENOMEM;
-
- pppp->base.destroy = nv98_ppp_destroy;
- pppp->base.init = nv98_ppp_init;
- pppp->base.fini = nv98_ppp_fini;
-
- NVOBJ_ENGINE_ADD(dev, PPP, &pppp->base);
- return 0;
-}
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-#include "nva3_copy.fuc.h"
-
-struct nva3_copy_engine {
- struct nouveau_exec_engine base;
-};
-
-static int
-nva3_copy_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramin = chan->ramin;
- struct nouveau_gpuobj *ctx = NULL;
- int ret;
-
- NV_DEBUG(dev, "ch%d\n", chan->id);
-
- ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC |
- NVOBJ_FLAG_ZERO_FREE, &ctx);
- if (ret)
- return ret;
-
- nv_wo32(ramin, 0xc0, 0x00190000);
- nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1);
- nv_wo32(ramin, 0xc8, ctx->vinst);
- nv_wo32(ramin, 0xcc, 0x00000000);
- nv_wo32(ramin, 0xd0, 0x00000000);
- nv_wo32(ramin, 0xd4, 0x00000000);
- dev_priv->engine.instmem.flush(dev);
-
- atomic_inc(&chan->vm->engref[engine]);
- chan->engctx[engine] = ctx;
- return 0;
-}
-
-static int
-nva3_copy_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
-
- /* fuc engine doesn't need an object, our ramht code does.. */
- ctx->engine = 3;
- ctx->class = class;
- return nouveau_ramht_insert(chan, handle, ctx);
-}
-
-static void
-nva3_copy_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
- int i;
-
- for (i = 0xc0; i <= 0xd4; i += 4)
- nv_wo32(chan->ramin, i, 0x00000000);
-
- atomic_dec(&chan->vm->engref[engine]);
- nouveau_gpuobj_ref(NULL, &ctx);
- chan->engctx[engine] = ctx;
-}
-
-static void
-nva3_copy_tlb_flush(struct drm_device *dev, int engine)
-{
- nv50_vm_flush_engine(dev, 0x0d);
-}
-
-static int
-nva3_copy_init(struct drm_device *dev, int engine)
-{
- int i;
-
- nv_mask(dev, 0x000200, 0x00002000, 0x00000000);
- nv_mask(dev, 0x000200, 0x00002000, 0x00002000);
- nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */
-
- /* upload ucode */
- nv_wr32(dev, 0x1041c0, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++)
- nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]);
-
- nv_wr32(dev, 0x104180, 0x01000000);
- for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, 0x104188, i >> 6);
- nv_wr32(dev, 0x104184, nva3_pcopy_code[i]);
- }
-
- /* start it running */
- nv_wr32(dev, 0x10410c, 0x00000000);
- nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */
- nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static int
-nva3_copy_fini(struct drm_device *dev, int engine, bool suspend)
-{
- nv_mask(dev, 0x104048, 0x00000003, 0x00000000);
- nv_wr32(dev, 0x104014, 0xffffffff);
- return 0;
-}
-
-static struct nouveau_enum nva3_copy_isr_error_name[] = {
- { 0x0001, "ILLEGAL_MTHD" },
- { 0x0002, "INVALID_ENUM" },
- { 0x0003, "INVALID_BITFIELD" },
- {}
-};
-
-static void
-nva3_copy_isr(struct drm_device *dev)
-{
- u32 dispatch = nv_rd32(dev, 0x10401c);
- u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16);
- u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff;
- u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff;
- u32 addr = nv_rd32(dev, 0x104040) >> 16;
- u32 mthd = (addr & 0x07ff) << 2;
- u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(dev, 0x104044);
- int chid = nv50_graph_isr_chid(dev, inst);
-
- if (stat & 0x00000040) {
- NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
- nouveau_enum_print(nva3_copy_isr_error_name, ssta);
- printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, mthd, data);
- nv_wr32(dev, 0x104004, 0x00000040);
- stat &= ~0x00000040;
- }
-
- if (stat) {
- NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
- nv_wr32(dev, 0x104004, stat);
- }
- nv50_fb_vm_trap(dev, 1);
-}
-
-static void
-nva3_copy_destroy(struct drm_device *dev, int engine)
-{
- struct nva3_copy_engine *pcopy = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, 22);
-
- NVOBJ_ENGINE_DEL(dev, COPY0);
- kfree(pcopy);
-}
-
-int
-nva3_copy_create(struct drm_device *dev)
-{
- struct nva3_copy_engine *pcopy;
-
- pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
- if (!pcopy)
- return -ENOMEM;
-
- pcopy->base.destroy = nva3_copy_destroy;
- pcopy->base.init = nva3_copy_init;
- pcopy->base.fini = nva3_copy_fini;
- pcopy->base.context_new = nva3_copy_context_new;
- pcopy->base.context_del = nva3_copy_context_del;
- pcopy->base.object_new = nva3_copy_object_new;
- pcopy->base.tlb_flush = nva3_copy_tlb_flush;
-
- nouveau_irq_register(dev, 22, nva3_copy_isr);
-
- NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
- NVOBJ_CLASS(dev, 0x85b5, COPY0);
- return 0;
-}
+++ /dev/null
-/* fuc microcode for copy engine on nva3- chipsets
- *
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-/* To build for nva3:nvc0
- * m4 -DNVA3 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nva3_copy.fuc.h
- *
- * To build for nvc0-
- * m4 -DNVC0 nva3_copy.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_copy.fuc.h
- */
-
-ifdef(`NVA3',
-.section #nva3_pcopy_data
-,
-.section #nvc0_pcopy_data
-)
-
-ctx_object: .b32 0
-ifdef(`NVA3',
-ctx_dma:
-ctx_dma_query: .b32 0
-ctx_dma_src: .b32 0
-ctx_dma_dst: .b32 0
-,)
-.equ #ctx_dma_count 3
-ctx_query_address_high: .b32 0
-ctx_query_address_low: .b32 0
-ctx_query_counter: .b32 0
-ctx_src_address_high: .b32 0
-ctx_src_address_low: .b32 0
-ctx_src_pitch: .b32 0
-ctx_src_tile_mode: .b32 0
-ctx_src_xsize: .b32 0
-ctx_src_ysize: .b32 0
-ctx_src_zsize: .b32 0
-ctx_src_zoff: .b32 0
-ctx_src_xoff: .b32 0
-ctx_src_yoff: .b32 0
-ctx_src_cpp: .b32 0
-ctx_dst_address_high: .b32 0
-ctx_dst_address_low: .b32 0
-ctx_dst_pitch: .b32 0
-ctx_dst_tile_mode: .b32 0
-ctx_dst_xsize: .b32 0
-ctx_dst_ysize: .b32 0
-ctx_dst_zsize: .b32 0
-ctx_dst_zoff: .b32 0
-ctx_dst_xoff: .b32 0
-ctx_dst_yoff: .b32 0
-ctx_dst_cpp: .b32 0
-ctx_format: .b32 0
-ctx_swz_const0: .b32 0
-ctx_swz_const1: .b32 0
-ctx_xcnt: .b32 0
-ctx_ycnt: .b32 0
-.align 256
-
-dispatch_table:
-// mthd 0x0000, NAME
-.b16 0x000 1
-.b32 #ctx_object ~0xffffffff
-// mthd 0x0100, NOP
-.b16 0x040 1
-.b32 0x00010000 + #cmd_nop ~0xffffffff
-// mthd 0x0140, PM_TRIGGER
-.b16 0x050 1
-.b32 0x00010000 + #cmd_pm_trigger ~0xffffffff
-ifdef(`NVA3', `
-// mthd 0x0180-0x018c, DMA_
-.b16 0x060 #ctx_dma_count
-dispatch_dma:
-.b32 0x00010000 + #cmd_dma ~0xffffffff
-.b32 0x00010000 + #cmd_dma ~0xffffffff
-.b32 0x00010000 + #cmd_dma ~0xffffffff
-',)
-// mthd 0x0200-0x0218, SRC_TILE
-.b16 0x80 7
-.b32 #ctx_src_tile_mode ~0x00000fff
-.b32 #ctx_src_xsize ~0x0007ffff
-.b32 #ctx_src_ysize ~0x00001fff
-.b32 #ctx_src_zsize ~0x000007ff
-.b32 #ctx_src_zoff ~0x00000fff
-.b32 #ctx_src_xoff ~0x0007ffff
-.b32 #ctx_src_yoff ~0x00001fff
-// mthd 0x0220-0x0238, DST_TILE
-.b16 0x88 7
-.b32 #ctx_dst_tile_mode ~0x00000fff
-.b32 #ctx_dst_xsize ~0x0007ffff
-.b32 #ctx_dst_ysize ~0x00001fff
-.b32 #ctx_dst_zsize ~0x000007ff
-.b32 #ctx_dst_zoff ~0x00000fff
-.b32 #ctx_dst_xoff ~0x0007ffff
-.b32 #ctx_dst_yoff ~0x00001fff
-// mthd 0x0300-0x0304, EXEC, WRCACHE_FLUSH
-.b16 0xc0 2
-.b32 0x00010000 + #cmd_exec ~0xffffffff
-.b32 0x00010000 + #cmd_wrcache_flush ~0xffffffff
-// mthd 0x030c-0x0340, various stuff
-.b16 0xc3 14
-.b32 #ctx_src_address_high ~0x000000ff
-.b32 #ctx_src_address_low ~0xffffffff
-.b32 #ctx_dst_address_high ~0x000000ff
-.b32 #ctx_dst_address_low ~0xffffffff
-.b32 #ctx_src_pitch ~0x0007ffff
-.b32 #ctx_dst_pitch ~0x0007ffff
-.b32 #ctx_xcnt ~0x0000ffff
-.b32 #ctx_ycnt ~0x00001fff
-.b32 #ctx_format ~0x0333ffff
-.b32 #ctx_swz_const0 ~0xffffffff
-.b32 #ctx_swz_const1 ~0xffffffff
-.b32 #ctx_query_address_high ~0x000000ff
-.b32 #ctx_query_address_low ~0xffffffff
-.b32 #ctx_query_counter ~0xffffffff
-.b16 0x800 0
-
-ifdef(`NVA3',
-.section #nva3_pcopy_code
-,
-.section #nvc0_pcopy_code
-)
-
-main:
- clear b32 $r0
- mov $sp $r0
-
- // setup i0 handler and route fifo and ctxswitch to it
- mov $r1 #ih
- mov $iv0 $r1
- mov $r1 0x400
- movw $r2 0xfff3
- sethi $r2 0
- iowr I[$r1 + 0x300] $r2
-
- // enable interrupts
- or $r2 0xc
- iowr I[$r1] $r2
- bset $flags ie0
-
- // enable fifo access and context switching
- mov $r1 0x1200
- mov $r2 3
- iowr I[$r1] $r2
-
- // sleep forever, waking for interrupts
- bset $flags $p0
- spin:
- sleep $p0
- bra #spin
-
-// i0 handler
-ih:
- iord $r1 I[$r0 + 0x200]
-
- and $r2 $r1 0x00000008
- bra e #ih_no_chsw
- call #chsw
- ih_no_chsw:
- and $r2 $r1 0x00000004
- bra e #ih_no_cmd
- call #dispatch
-
- ih_no_cmd:
- and $r1 $r1 0x0000000c
- iowr I[$r0 + 0x100] $r1
- iret
-
-// $p1 direction (0 = unload, 1 = load)
-// $r3 channel
-swctx:
- mov $r4 0x7700
- mov $xtargets $r4
-ifdef(`NVA3', `
- // target 7 hardcoded to ctx dma object
- mov $xdbase $r0
-', ` // NVC0
- // read SCRATCH3 to decide if we are PCOPY0 or PCOPY1
- mov $r4 0x2100
- iord $r4 I[$r4 + 0]
- and $r4 1
- shl b32 $r4 4
- add b32 $r4 0x30
-
- // channel is in vram
- mov $r15 0x61c
- shl b32 $r15 6
- mov $r5 0x114
- iowrs I[$r15] $r5
-
- // read 16-byte PCOPYn info, containing context pointer, from channel
- shl b32 $r5 $r3 4
- add b32 $r5 2
- mov $xdbase $r5
- mov $r5 $sp
- // get a chunk of stack space, aligned to 256 byte boundary
- sub b32 $r5 0x100
- mov $r6 0xff
- not b32 $r6
- and $r5 $r6
- sethi $r5 0x00020000
- xdld $r4 $r5
- xdwait
- sethi $r5 0
-
- // set context pointer, from within channel VM
- mov $r14 0
- iowrs I[$r15] $r14
- ld b32 $r4 D[$r5 + 0]
- shr b32 $r4 8
- ld b32 $r6 D[$r5 + 4]
- shl b32 $r6 24
- or $r4 $r6
- mov $xdbase $r4
-')
- // 256-byte context, at start of data segment
- mov b32 $r4 $r0
- sethi $r4 0x60000
-
- // swap!
- bra $p1 #swctx_load
- xdst $r0 $r4
- bra #swctx_done
- swctx_load:
- xdld $r0 $r4
- swctx_done:
- xdwait
- ret
-
-chsw:
- // read current channel
- mov $r2 0x1400
- iord $r3 I[$r2]
-
- // if it's active, unload it and return
- xbit $r15 $r3 0x1e
- bra e #chsw_no_unload
- bclr $flags $p1
- call #swctx
- bclr $r3 0x1e
- iowr I[$r2] $r3
- mov $r4 1
- iowr I[$r2 + 0x200] $r4
- ret
-
- // read next channel
- chsw_no_unload:
- iord $r3 I[$r2 + 0x100]
-
- // is there a channel waiting to be loaded?
- xbit $r13 $r3 0x1e
- bra e #chsw_finish_load
- bset $flags $p1
- call #swctx
-ifdef(`NVA3',
- // load dma objects back into TARGET regs
- mov $r5 #ctx_dma
- mov $r6 #ctx_dma_count
- chsw_load_ctx_dma:
- ld b32 $r7 D[$r5 + $r6 * 4]
- add b32 $r8 $r6 0x180
- shl b32 $r8 8
- iowr I[$r8] $r7
- sub b32 $r6 1
- bra nc #chsw_load_ctx_dma
-,)
-
- chsw_finish_load:
- mov $r3 2
- iowr I[$r2 + 0x200] $r3
- ret
-
-dispatch:
- // read incoming fifo command
- mov $r3 0x1900
- iord $r2 I[$r3 + 0x100]
- iord $r3 I[$r3 + 0x000]
- and $r4 $r2 0x7ff
- // $r2 will be used to store exception data
- shl b32 $r2 0x10
-
- // lookup method in the dispatch table, ILLEGAL_MTHD if not found
- mov $r5 #dispatch_table
- clear b32 $r6
- clear b32 $r7
- dispatch_loop:
- ld b16 $r6 D[$r5 + 0]
- ld b16 $r7 D[$r5 + 2]
- add b32 $r5 4
- cmpu b32 $r4 $r6
- bra c #dispatch_illegal_mthd
- add b32 $r7 $r6
- cmpu b32 $r4 $r7
- bra c #dispatch_valid_mthd
- sub b32 $r7 $r6
- shl b32 $r7 3
- add b32 $r5 $r7
- bra #dispatch_loop
-
- // ensure no bits set in reserved fields, INVALID_BITFIELD
- dispatch_valid_mthd:
- sub b32 $r4 $r6
- shl b32 $r4 3
- add b32 $r4 $r5
- ld b32 $r5 D[$r4 + 4]
- and $r5 $r3
- cmpu b32 $r5 0
- bra ne #dispatch_invalid_bitfield
-
- // depending on dispatch flags: execute method, or save data as state
- ld b16 $r5 D[$r4 + 0]
- ld b16 $r6 D[$r4 + 2]
- cmpu b32 $r6 0
- bra ne #dispatch_cmd
- st b32 D[$r5] $r3
- bra #dispatch_done
- dispatch_cmd:
- bclr $flags $p1
- call $r5
- bra $p1 #dispatch_error
- bra #dispatch_done
-
- dispatch_invalid_bitfield:
- or $r2 2
- dispatch_illegal_mthd:
- or $r2 1
-
- // store exception data in SCRATCH0/SCRATCH1, signal hostirq
- dispatch_error:
- mov $r4 0x1000
- iowr I[$r4 + 0x000] $r2
- iowr I[$r4 + 0x100] $r3
- mov $r2 0x40
- iowr I[$r0] $r2
- hostirq_wait:
- iord $r2 I[$r0 + 0x200]
- and $r2 0x40
- cmpu b32 $r2 0
- bra ne #hostirq_wait
-
- dispatch_done:
- mov $r2 0x1d00
- mov $r3 1
- iowr I[$r2] $r3
- ret
-
-// No-operation
-//
-// Inputs:
-// $r1: irqh state
-// $r2: hostirq state
-// $r3: data
-// $r4: dispatch table entry
-// Outputs:
-// $r1: irqh state
-// $p1: set on error
-// $r2: hostirq state
-// $r3: data
-cmd_nop:
- ret
-
-// PM_TRIGGER
-//
-// Inputs:
-// $r1: irqh state
-// $r2: hostirq state
-// $r3: data
-// $r4: dispatch table entry
-// Outputs:
-// $r1: irqh state
-// $p1: set on error
-// $r2: hostirq state
-// $r3: data
-cmd_pm_trigger:
- mov $r2 0x2200
- clear b32 $r3
- sethi $r3 0x20000
- iowr I[$r2] $r3
- ret
-
-ifdef(`NVA3',
-// SET_DMA_* method handler
-//
-// Inputs:
-// $r1: irqh state
-// $r2: hostirq state
-// $r3: data
-// $r4: dispatch table entry
-// Outputs:
-// $r1: irqh state
-// $p1: set on error
-// $r2: hostirq state
-// $r3: data
-cmd_dma:
- sub b32 $r4 #dispatch_dma
- shr b32 $r4 1
- bset $r3 0x1e
- st b32 D[$r4 + #ctx_dma] $r3
- add b32 $r4 0x600
- shl b32 $r4 6
- iowr I[$r4] $r3
- ret
-,)
-
-// Calculates the hw swizzle mask and adjusts the surface's xcnt to match
-//
-cmd_exec_set_format:
- // zero out a chunk of the stack to store the swizzle into
- add $sp -0x10
- st b32 D[$sp + 0x00] $r0
- st b32 D[$sp + 0x04] $r0
- st b32 D[$sp + 0x08] $r0
- st b32 D[$sp + 0x0c] $r0
-
- // extract cpp, src_ncomp and dst_ncomp from FORMAT
- ld b32 $r4 D[$r0 + #ctx_format]
- extr $r5 $r4 16:17
- add b32 $r5 1
- extr $r6 $r4 20:21
- add b32 $r6 1
- extr $r7 $r4 24:25
- add b32 $r7 1
-
- // convert FORMAT swizzle mask to hw swizzle mask
- bclr $flags $p2
- clear b32 $r8
- clear b32 $r9
- ncomp_loop:
- and $r10 $r4 0xf
- shr b32 $r4 4
- clear b32 $r11
- bpc_loop:
- cmpu b8 $r10 4
- bra nc #cmp_c0
- mulu $r12 $r10 $r5
- add b32 $r12 $r11
- bset $flags $p2
- bra #bpc_next
- cmp_c0:
- bra ne #cmp_c1
- mov $r12 0x10
- add b32 $r12 $r11
- bra #bpc_next
- cmp_c1:
- cmpu b8 $r10 6
- bra nc #cmp_zero
- mov $r12 0x14
- add b32 $r12 $r11
- bra #bpc_next
- cmp_zero:
- mov $r12 0x80
- bpc_next:
- st b8 D[$sp + $r8] $r12
- add b32 $r8 1
- add b32 $r11 1
- cmpu b32 $r11 $r5
- bra c #bpc_loop
- add b32 $r9 1
- cmpu b32 $r9 $r7
- bra c #ncomp_loop
-
- // SRC_XCNT = (xcnt * src_cpp), or 0 if no src ref in swz (hw will hang)
- mulu $r6 $r5
- st b32 D[$r0 + #ctx_src_cpp] $r6
- ld b32 $r8 D[$r0 + #ctx_xcnt]
- mulu $r6 $r8
- bra $p2 #dst_xcnt
- clear b32 $r6
-
- dst_xcnt:
- mulu $r7 $r5
- st b32 D[$r0 + #ctx_dst_cpp] $r7
- mulu $r7 $r8
-
- mov $r5 0x810
- shl b32 $r5 6
- iowr I[$r5 + 0x000] $r6
- iowr I[$r5 + 0x100] $r7
- add b32 $r5 0x800
- ld b32 $r6 D[$r0 + #ctx_dst_cpp]
- sub b32 $r6 1
- shl b32 $r6 8
- ld b32 $r7 D[$r0 + #ctx_src_cpp]
- sub b32 $r7 1
- or $r6 $r7
- iowr I[$r5 + 0x000] $r6
- add b32 $r5 0x100
- ld b32 $r6 D[$sp + 0x00]
- iowr I[$r5 + 0x000] $r6
- ld b32 $r6 D[$sp + 0x04]
- iowr I[$r5 + 0x100] $r6
- ld b32 $r6 D[$sp + 0x08]
- iowr I[$r5 + 0x200] $r6
- ld b32 $r6 D[$sp + 0x0c]
- iowr I[$r5 + 0x300] $r6
- add b32 $r5 0x400
- ld b32 $r6 D[$r0 + #ctx_swz_const0]
- iowr I[$r5 + 0x000] $r6
- ld b32 $r6 D[$r0 + #ctx_swz_const1]
- iowr I[$r5 + 0x100] $r6
- add $sp 0x10
- ret
-
-// Setup to handle a tiled surface
-//
-// Calculates a number of parameters the hardware requires in order
-// to correctly handle tiling.
-//
-// Offset calculation is performed as follows (Tp/Th/Td from TILE_MODE):
-// nTx = round_up(w * cpp, 1 << Tp) >> Tp
-// nTy = round_up(h, 1 << Th) >> Th
-// Txo = (x * cpp) & ((1 << Tp) - 1)
-// Tx = (x * cpp) >> Tp
-// Tyo = y & ((1 << Th) - 1)
-// Ty = y >> Th
-// Tzo = z & ((1 << Td) - 1)
-// Tz = z >> Td
-//
-// off = (Tzo << Tp << Th) + (Tyo << Tp) + Txo
-// off += ((Tz * nTy * nTx)) + (Ty * nTx) + Tx) << Td << Th << Tp;
-//
-// Inputs:
-// $r4: hw command (0x104800)
-// $r5: ctx offset adjustment for src/dst selection
-// $p2: set if dst surface
-//
-cmd_exec_set_surface_tiled:
- // translate TILE_MODE into Tp, Th, Td shift values
- ld b32 $r7 D[$r5 + #ctx_src_tile_mode]
- extr $r9 $r7 8:11
- extr $r8 $r7 4:7
-ifdef(`NVA3',
- add b32 $r8 2
-,
- add b32 $r8 3
-)
- extr $r7 $r7 0:3
- cmp b32 $r7 0xe
- bra ne #xtile64
- mov $r7 4
- bra #xtileok
- xtile64:
- xbit $r7 $flags $p2
- add b32 $r7 17
- bset $r4 $r7
- mov $r7 6
- xtileok:
-
- // Op = (x * cpp) & ((1 << Tp) - 1)
- // Tx = (x * cpp) >> Tp
- ld b32 $r10 D[$r5 + #ctx_src_xoff]
- ld b32 $r11 D[$r5 + #ctx_src_cpp]
- mulu $r10 $r11
- mov $r11 1
- shl b32 $r11 $r7
- sub b32 $r11 1
- and $r12 $r10 $r11
- shr b32 $r10 $r7
-
- // Tyo = y & ((1 << Th) - 1)
- // Ty = y >> Th
- ld b32 $r13 D[$r5 + #ctx_src_yoff]
- mov $r14 1
- shl b32 $r14 $r8
- sub b32 $r14 1
- and $r11 $r13 $r14
- shr b32 $r13 $r8
-
- // YTILE = ((1 << Th) << 12) | ((1 << Th) - Tyo)
- add b32 $r14 1
- shl b32 $r15 $r14 12
- sub b32 $r14 $r11
- or $r15 $r14
- xbit $r6 $flags $p2
- add b32 $r6 0x208
- shl b32 $r6 8
- iowr I[$r6 + 0x000] $r15
-
- // Op += Tyo << Tp
- shl b32 $r11 $r7
- add b32 $r12 $r11
-
- // nTx = ((w * cpp) + ((1 << Tp) - 1) >> Tp)
- ld b32 $r15 D[$r5 + #ctx_src_xsize]
- ld b32 $r11 D[$r5 + #ctx_src_cpp]
- mulu $r15 $r11
- mov $r11 1
- shl b32 $r11 $r7
- sub b32 $r11 1
- add b32 $r15 $r11
- shr b32 $r15 $r7
- push $r15
-
- // nTy = (h + ((1 << Th) - 1)) >> Th
- ld b32 $r15 D[$r5 + #ctx_src_ysize]
- mov $r11 1
- shl b32 $r11 $r8
- sub b32 $r11 1
- add b32 $r15 $r11
- shr b32 $r15 $r8
- push $r15
-
- // Tys = Tp + Th
- // CFG_YZ_TILE_SIZE = ((1 << Th) >> 2) << Td
- add b32 $r7 $r8
- sub b32 $r8 2
- mov $r11 1
- shl b32 $r11 $r8
- shl b32 $r11 $r9
-
- // Tzo = z & ((1 << Td) - 1)
- // Tz = z >> Td
- // Op += Tzo << Tys
- // Ts = Tys + Td
- ld b32 $r8 D[$r5 + #ctx_src_zoff]
- mov $r14 1
- shl b32 $r14 $r9
- sub b32 $r14 1
- and $r15 $r8 $r14
- shl b32 $r15 $r7
- add b32 $r12 $r15
- add b32 $r7 $r9
- shr b32 $r8 $r9
-
- // Ot = ((Tz * nTy * nTx) + (Ty * nTx) + Tx) << Ts
- pop $r15
- pop $r9
- mulu $r13 $r9
- add b32 $r10 $r13
- mulu $r8 $r9
- mulu $r8 $r15
- add b32 $r10 $r8
- shl b32 $r10 $r7
-
- // PITCH = (nTx - 1) << Ts
- sub b32 $r9 1
- shl b32 $r9 $r7
- iowr I[$r6 + 0x200] $r9
-
- // SRC_ADDRESS_LOW = (Ot + Op) & 0xffffffff
- // CFG_ADDRESS_HIGH |= ((Ot + Op) >> 32) << 16
- ld b32 $r7 D[$r5 + #ctx_src_address_low]
- ld b32 $r8 D[$r5 + #ctx_src_address_high]
- add b32 $r10 $r12
- add b32 $r7 $r10
- adc b32 $r8 0
- shl b32 $r8 16
- or $r8 $r11
- sub b32 $r6 0x600
- iowr I[$r6 + 0x000] $r7
- add b32 $r6 0x400
- iowr I[$r6 + 0x000] $r8
- ret
-
-// Setup to handle a linear surface
-//
-// Nothing to see here.. Sets ADDRESS and PITCH, pretty non-exciting
-//
-cmd_exec_set_surface_linear:
- xbit $r6 $flags $p2
- add b32 $r6 0x202
- shl b32 $r6 8
- ld b32 $r7 D[$r5 + #ctx_src_address_low]
- iowr I[$r6 + 0x000] $r7
- add b32 $r6 0x400
- ld b32 $r7 D[$r5 + #ctx_src_address_high]
- shl b32 $r7 16
- iowr I[$r6 + 0x000] $r7
- add b32 $r6 0x400
- ld b32 $r7 D[$r5 + #ctx_src_pitch]
- iowr I[$r6 + 0x000] $r7
- ret
-
-// wait for regs to be available for use
-cmd_exec_wait:
- push $r0
- push $r1
- mov $r0 0x800
- shl b32 $r0 6
- loop:
- iord $r1 I[$r0]
- and $r1 1
- bra ne #loop
- pop $r1
- pop $r0
- ret
-
-cmd_exec_query:
- // if QUERY_SHORT not set, write out { -, 0, TIME_LO, TIME_HI }
- xbit $r4 $r3 13
- bra ne #query_counter
- call #cmd_exec_wait
- mov $r4 0x80c
- shl b32 $r4 6
- ld b32 $r5 D[$r0 + #ctx_query_address_low]
- add b32 $r5 4
- iowr I[$r4 + 0x000] $r5
- iowr I[$r4 + 0x100] $r0
- mov $r5 0xc
- iowr I[$r4 + 0x200] $r5
- add b32 $r4 0x400
- ld b32 $r5 D[$r0 + #ctx_query_address_high]
- shl b32 $r5 16
- iowr I[$r4 + 0x000] $r5
- add b32 $r4 0x500
- mov $r5 0x00000b00
- sethi $r5 0x00010000
- iowr I[$r4 + 0x000] $r5
- mov $r5 0x00004040
- shl b32 $r5 1
- sethi $r5 0x80800000
- iowr I[$r4 + 0x100] $r5
- mov $r5 0x00001110
- sethi $r5 0x13120000
- iowr I[$r4 + 0x200] $r5
- mov $r5 0x00001514
- sethi $r5 0x17160000
- iowr I[$r4 + 0x300] $r5
- mov $r5 0x00002601
- sethi $r5 0x00010000
- mov $r4 0x800
- shl b32 $r4 6
- iowr I[$r4 + 0x000] $r5
-
- // write COUNTER
- query_counter:
- call #cmd_exec_wait
- mov $r4 0x80c
- shl b32 $r4 6
- ld b32 $r5 D[$r0 + #ctx_query_address_low]
- iowr I[$r4 + 0x000] $r5
- iowr I[$r4 + 0x100] $r0
- mov $r5 0x4
- iowr I[$r4 + 0x200] $r5
- add b32 $r4 0x400
- ld b32 $r5 D[$r0 + #ctx_query_address_high]
- shl b32 $r5 16
- iowr I[$r4 + 0x000] $r5
- add b32 $r4 0x500
- mov $r5 0x00000300
- iowr I[$r4 + 0x000] $r5
- mov $r5 0x00001110
- sethi $r5 0x13120000
- iowr I[$r4 + 0x100] $r5
- ld b32 $r5 D[$r0 + #ctx_query_counter]
- add b32 $r4 0x500
- iowr I[$r4 + 0x000] $r5
- mov $r5 0x00002601
- sethi $r5 0x00010000
- mov $r4 0x800
- shl b32 $r4 6
- iowr I[$r4 + 0x000] $r5
- ret
-
-// Execute a copy operation
-//
-// Inputs:
-// $r1: irqh state
-// $r2: hostirq state
-// $r3: data
-// 000002000 QUERY_SHORT
-// 000001000 QUERY
-// 000000100 DST_LINEAR
-// 000000010 SRC_LINEAR
-// 000000001 FORMAT
-// $r4: dispatch table entry
-// Outputs:
-// $r1: irqh state
-// $p1: set on error
-// $r2: hostirq state
-// $r3: data
-cmd_exec:
- call #cmd_exec_wait
-
- // if format requested, call function to calculate it, otherwise
- // fill in cpp/xcnt for both surfaces as if (cpp == 1)
- xbit $r15 $r3 0
- bra e #cmd_exec_no_format
- call #cmd_exec_set_format
- mov $r4 0x200
- bra #cmd_exec_init_src_surface
- cmd_exec_no_format:
- mov $r6 0x810
- shl b32 $r6 6
- mov $r7 1
- st b32 D[$r0 + #ctx_src_cpp] $r7
- st b32 D[$r0 + #ctx_dst_cpp] $r7
- ld b32 $r7 D[$r0 + #ctx_xcnt]
- iowr I[$r6 + 0x000] $r7
- iowr I[$r6 + 0x100] $r7
- clear b32 $r4
-
- cmd_exec_init_src_surface:
- bclr $flags $p2
- clear b32 $r5
- xbit $r15 $r3 4
- bra e #src_tiled
- call #cmd_exec_set_surface_linear
- bra #cmd_exec_init_dst_surface
- src_tiled:
- call #cmd_exec_set_surface_tiled
- bset $r4 7
-
- cmd_exec_init_dst_surface:
- bset $flags $p2
- mov $r5 #ctx_dst_address_high - #ctx_src_address_high
- xbit $r15 $r3 8
- bra e #dst_tiled
- call #cmd_exec_set_surface_linear
- bra #cmd_exec_kick
- dst_tiled:
- call #cmd_exec_set_surface_tiled
- bset $r4 8
-
- cmd_exec_kick:
- mov $r5 0x800
- shl b32 $r5 6
- ld b32 $r6 D[$r0 + #ctx_ycnt]
- iowr I[$r5 + 0x100] $r6
- mov $r6 0x0041
- // SRC_TARGET = 1, DST_TARGET = 2
- sethi $r6 0x44000000
- or $r4 $r6
- iowr I[$r5] $r4
-
- // if requested, queue up a QUERY write after the copy has completed
- xbit $r15 $r3 12
- bra e #cmd_exec_done
- call #cmd_exec_query
-
- cmd_exec_done:
- ret
-
-// Flush write cache
-//
-// Inputs:
-// $r1: irqh state
-// $r2: hostirq state
-// $r3: data
-// $r4: dispatch table entry
-// Outputs:
-// $r1: irqh state
-// $p1: set on error
-// $r2: hostirq state
-// $r3: data
-cmd_wrcache_flush:
- mov $r2 0x2200
- clear b32 $r3
- sethi $r3 0x10000
- iowr I[$r2] $r3
- ret
-
-.align 0x100
+++ /dev/null
-u32 nva3_pcopy_data[] = {
-/* 0x0000: ctx_object */
- 0x00000000,
-/* 0x0004: ctx_dma */
-/* 0x0004: ctx_dma_query */
- 0x00000000,
-/* 0x0008: ctx_dma_src */
- 0x00000000,
-/* 0x000c: ctx_dma_dst */
- 0x00000000,
-/* 0x0010: ctx_query_address_high */
- 0x00000000,
-/* 0x0014: ctx_query_address_low */
- 0x00000000,
-/* 0x0018: ctx_query_counter */
- 0x00000000,
-/* 0x001c: ctx_src_address_high */
- 0x00000000,
-/* 0x0020: ctx_src_address_low */
- 0x00000000,
-/* 0x0024: ctx_src_pitch */
- 0x00000000,
-/* 0x0028: ctx_src_tile_mode */
- 0x00000000,
-/* 0x002c: ctx_src_xsize */
- 0x00000000,
-/* 0x0030: ctx_src_ysize */
- 0x00000000,
-/* 0x0034: ctx_src_zsize */
- 0x00000000,
-/* 0x0038: ctx_src_zoff */
- 0x00000000,
-/* 0x003c: ctx_src_xoff */
- 0x00000000,
-/* 0x0040: ctx_src_yoff */
- 0x00000000,
-/* 0x0044: ctx_src_cpp */
- 0x00000000,
-/* 0x0048: ctx_dst_address_high */
- 0x00000000,
-/* 0x004c: ctx_dst_address_low */
- 0x00000000,
-/* 0x0050: ctx_dst_pitch */
- 0x00000000,
-/* 0x0054: ctx_dst_tile_mode */
- 0x00000000,
-/* 0x0058: ctx_dst_xsize */
- 0x00000000,
-/* 0x005c: ctx_dst_ysize */
- 0x00000000,
-/* 0x0060: ctx_dst_zsize */
- 0x00000000,
-/* 0x0064: ctx_dst_zoff */
- 0x00000000,
-/* 0x0068: ctx_dst_xoff */
- 0x00000000,
-/* 0x006c: ctx_dst_yoff */
- 0x00000000,
-/* 0x0070: ctx_dst_cpp */
- 0x00000000,
-/* 0x0074: ctx_format */
- 0x00000000,
-/* 0x0078: ctx_swz_const0 */
- 0x00000000,
-/* 0x007c: ctx_swz_const1 */
- 0x00000000,
-/* 0x0080: ctx_xcnt */
- 0x00000000,
-/* 0x0084: ctx_ycnt */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-/* 0x0100: dispatch_table */
- 0x00010000,
- 0x00000000,
- 0x00000000,
- 0x00010040,
- 0x00010160,
- 0x00000000,
- 0x00010050,
- 0x00010162,
- 0x00000000,
- 0x00030060,
-/* 0x0128: dispatch_dma */
- 0x00010170,
- 0x00000000,
- 0x00010170,
- 0x00000000,
- 0x00010170,
- 0x00000000,
- 0x00070080,
- 0x00000028,
- 0xfffff000,
- 0x0000002c,
- 0xfff80000,
- 0x00000030,
- 0xffffe000,
- 0x00000034,
- 0xfffff800,
- 0x00000038,
- 0xfffff000,
- 0x0000003c,
- 0xfff80000,
- 0x00000040,
- 0xffffe000,
- 0x00070088,
- 0x00000054,
- 0xfffff000,
- 0x00000058,
- 0xfff80000,
- 0x0000005c,
- 0xffffe000,
- 0x00000060,
- 0xfffff800,
- 0x00000064,
- 0xfffff000,
- 0x00000068,
- 0xfff80000,
- 0x0000006c,
- 0xffffe000,
- 0x000200c0,
- 0x00010492,
- 0x00000000,
- 0x0001051b,
- 0x00000000,
- 0x000e00c3,
- 0x0000001c,
- 0xffffff00,
- 0x00000020,
- 0x00000000,
- 0x00000048,
- 0xffffff00,
- 0x0000004c,
- 0x00000000,
- 0x00000024,
- 0xfff80000,
- 0x00000050,
- 0xfff80000,
- 0x00000080,
- 0xffff0000,
- 0x00000084,
- 0xffffe000,
- 0x00000074,
- 0xfccc0000,
- 0x00000078,
- 0x00000000,
- 0x0000007c,
- 0x00000000,
- 0x00000010,
- 0xffffff00,
- 0x00000014,
- 0x00000000,
- 0x00000018,
- 0x00000000,
- 0x00000800,
-};
-
-u32 nva3_pcopy_code[] = {
-/* 0x0000: main */
- 0x04fe04bd,
- 0x3517f000,
- 0xf10010fe,
- 0xf1040017,
- 0xf0fff327,
- 0x12d00023,
- 0x0c25f0c0,
- 0xf40012d0,
- 0x17f11031,
- 0x27f01200,
- 0x0012d003,
-/* 0x002f: spin */
- 0xf40031f4,
- 0x0ef40028,
-/* 0x0035: ih */
- 0x8001cffd,
- 0xf40812c4,
- 0x21f4060b,
-/* 0x0041: ih_no_chsw */
- 0x0412c472,
- 0xf4060bf4,
-/* 0x004a: ih_no_cmd */
- 0x11c4c321,
- 0x4001d00c,
-/* 0x0052: swctx */
- 0x47f101f8,
- 0x4bfe7700,
- 0x0007fe00,
- 0xf00204b9,
- 0x01f40643,
- 0x0604fa09,
-/* 0x006b: swctx_load */
- 0xfa060ef4,
-/* 0x006e: swctx_done */
- 0x03f80504,
-/* 0x0072: chsw */
- 0x27f100f8,
- 0x23cf1400,
- 0x1e3fc800,
- 0xf4170bf4,
- 0x21f40132,
- 0x1e3af052,
- 0xf00023d0,
- 0x24d00147,
-/* 0x0093: chsw_no_unload */
- 0xcf00f880,
- 0x3dc84023,
- 0x220bf41e,
- 0xf40131f4,
- 0x57f05221,
- 0x0367f004,
-/* 0x00a8: chsw_load_ctx_dma */
- 0xa07856bc,
- 0xb6018068,
- 0x87d00884,
- 0x0162b600,
-/* 0x00bb: chsw_finish_load */
- 0xf0f018f4,
- 0x23d00237,
-/* 0x00c3: dispatch */
- 0xf100f880,
- 0xcf190037,
- 0x33cf4032,
- 0xff24e400,
- 0x1024b607,
- 0x010057f1,
- 0x74bd64bd,
-/* 0x00dc: dispatch_loop */
- 0x58005658,
- 0x50b60157,
- 0x0446b804,
- 0xbb4d08f4,
- 0x47b80076,
- 0x0f08f404,
- 0xb60276bb,
- 0x57bb0374,
- 0xdf0ef400,
-/* 0x0100: dispatch_valid_mthd */
- 0xb60246bb,
- 0x45bb0344,
- 0x01459800,
- 0xb00453fd,
- 0x1bf40054,
- 0x00455820,
- 0xb0014658,
- 0x1bf40064,
- 0x00538009,
-/* 0x0127: dispatch_cmd */
- 0xf4300ef4,
- 0x55f90132,
- 0xf40c01f4,
-/* 0x0132: dispatch_invalid_bitfield */
- 0x25f0250e,
-/* 0x0135: dispatch_illegal_mthd */
- 0x0125f002,
-/* 0x0138: dispatch_error */
- 0x100047f1,
- 0xd00042d0,
- 0x27f04043,
- 0x0002d040,
-/* 0x0148: hostirq_wait */
- 0xf08002cf,
- 0x24b04024,
- 0xf71bf400,
-/* 0x0154: dispatch_done */
- 0x1d0027f1,
- 0xd00137f0,
- 0x00f80023,
-/* 0x0160: cmd_nop */
-/* 0x0162: cmd_pm_trigger */
- 0x27f100f8,
- 0x34bd2200,
- 0xd00233f0,
- 0x00f80023,
-/* 0x0170: cmd_dma */
- 0x012842b7,
- 0xf00145b6,
- 0x43801e39,
- 0x0040b701,
- 0x0644b606,
- 0xf80043d0,
-/* 0x0189: cmd_exec_set_format */
- 0xf030f400,
- 0xb00001b0,
- 0x01b00101,
- 0x0301b002,
- 0xc71d0498,
- 0x50b63045,
- 0x3446c701,
- 0xc70160b6,
- 0x70b63847,
- 0x0232f401,
- 0x94bd84bd,
-/* 0x01b4: ncomp_loop */
- 0xb60f4ac4,
- 0xb4bd0445,
-/* 0x01bc: bpc_loop */
- 0xf404a430,
- 0xa5ff0f18,
- 0x00cbbbc0,
- 0xf40231f4,
-/* 0x01ce: cmp_c0 */
- 0x1bf4220e,
- 0x10c7f00c,
- 0xf400cbbb,
-/* 0x01da: cmp_c1 */
- 0xa430160e,
- 0x0c18f406,
- 0xbb14c7f0,
- 0x0ef400cb,
-/* 0x01e9: cmp_zero */
- 0x80c7f107,
-/* 0x01ed: bpc_next */
- 0x01c83800,
- 0xb60180b6,
- 0xb5b801b0,
- 0xc308f404,
- 0xb80190b6,
- 0x08f40497,
- 0x0065fdb2,
- 0x98110680,
- 0x68fd2008,
- 0x0502f400,
-/* 0x0216: dst_xcnt */
- 0x75fd64bd,
- 0x1c078000,
- 0xf10078fd,
- 0xb6081057,
- 0x56d00654,
- 0x4057d000,
- 0x080050b7,
- 0xb61c0698,
- 0x64b60162,
- 0x11079808,
- 0xfd0172b6,
- 0x56d00567,
- 0x0050b700,
- 0x0060b401,
- 0xb40056d0,
- 0x56d00160,
- 0x0260b440,
- 0xb48056d0,
- 0x56d00360,
- 0x0050b7c0,
- 0x1e069804,
- 0x980056d0,
- 0x56d01f06,
- 0x1030f440,
-/* 0x0276: cmd_exec_set_surface_tiled */
- 0x579800f8,
- 0x6879c70a,
- 0xb66478c7,
- 0x77c70280,
- 0x0e76b060,
- 0xf0091bf4,
- 0x0ef40477,
-/* 0x0291: xtile64 */
- 0x027cf00f,
- 0xfd1170b6,
- 0x77f00947,
-/* 0x029d: xtileok */
- 0x0f5a9806,
- 0xfd115b98,
- 0xb7f000ab,
- 0x04b7bb01,
- 0xff01b2b6,
- 0xa7bbc4ab,
- 0x105d9805,
- 0xbb01e7f0,
- 0xe2b604e8,
- 0xb4deff01,
- 0xb605d8bb,
- 0xef9401e0,
- 0x02ebbb0c,
- 0xf005fefd,
- 0x60b7026c,
- 0x64b60208,
- 0x006fd008,
- 0xbb04b7bb,
- 0x5f9800cb,
- 0x115b980b,
- 0xf000fbfd,
- 0xb7bb01b7,
- 0x01b2b604,
- 0xbb00fbbb,
- 0xf0f905f7,
- 0xf00c5f98,
- 0xb8bb01b7,
- 0x01b2b604,
- 0xbb00fbbb,
- 0xf0f905f8,
- 0xb60078bb,
- 0xb7f00282,
- 0x04b8bb01,
- 0x9804b9bb,
- 0xe7f00e58,
- 0x04e9bb01,
- 0xff01e2b6,
- 0xf7bbf48e,
- 0x00cfbb04,
- 0xbb0079bb,
- 0xf0fc0589,
- 0xd9fd90fc,
- 0x00adbb00,
- 0xfd0089fd,
- 0xa8bb008f,
- 0x04a7bb00,
- 0xbb0192b6,
- 0x69d00497,
- 0x08579880,
- 0xbb075898,
- 0x7abb00ac,
- 0x0081b600,
- 0xfd1084b6,
- 0x62b7058b,
- 0x67d00600,
- 0x0060b700,
- 0x0068d004,
-/* 0x0382: cmd_exec_set_surface_linear */
- 0x6cf000f8,
- 0x0260b702,
- 0x0864b602,
- 0xd0085798,
- 0x60b70067,
- 0x57980400,
- 0x1074b607,
- 0xb70067d0,
- 0x98040060,
- 0x67d00957,
-/* 0x03ab: cmd_exec_wait */
- 0xf900f800,
- 0xf110f900,
- 0xb6080007,
-/* 0x03b6: loop */
- 0x01cf0604,
- 0x0114f000,
- 0xfcfa1bf4,
- 0xf800fc10,
-/* 0x03c5: cmd_exec_query */
- 0x0d34c800,
- 0xf5701bf4,
- 0xf103ab21,
- 0xb6080c47,
- 0x05980644,
- 0x0450b605,
- 0xd00045d0,
- 0x57f04040,
- 0x8045d00c,
- 0x040040b7,
- 0xb6040598,
- 0x45d01054,
- 0x0040b700,
- 0x0057f105,
- 0x0153f00b,
- 0xf10045d0,
- 0xb6404057,
- 0x53f10154,
- 0x45d08080,
- 0x1057f140,
- 0x1253f111,
- 0x8045d013,
- 0x151457f1,
- 0x171653f1,
- 0xf1c045d0,
- 0xf0260157,
- 0x47f10153,
- 0x44b60800,
- 0x0045d006,
-/* 0x0438: query_counter */
- 0x03ab21f5,
- 0x080c47f1,
- 0x980644b6,
- 0x45d00505,
- 0x4040d000,
- 0xd00457f0,
- 0x40b78045,
- 0x05980400,
- 0x1054b604,
- 0xb70045d0,
- 0xf1050040,
- 0xd0030057,
- 0x57f10045,
- 0x53f11110,
- 0x45d01312,
- 0x06059840,
- 0x050040b7,
- 0xf10045d0,
- 0xf0260157,
- 0x47f10153,
- 0x44b60800,
- 0x0045d006,
-/* 0x0492: cmd_exec */
- 0x21f500f8,
- 0x3fc803ab,
- 0x0e0bf400,
- 0x018921f5,
- 0x020047f1,
-/* 0x04a7: cmd_exec_no_format */
- 0xf11e0ef4,
- 0xb6081067,
- 0x77f00664,
- 0x11078001,
- 0x981c0780,
- 0x67d02007,
- 0x4067d000,
-/* 0x04c2: cmd_exec_init_src_surface */
- 0x32f444bd,
- 0xc854bd02,
- 0x0bf4043f,
- 0x8221f50a,
- 0x0a0ef403,
-/* 0x04d4: src_tiled */
- 0x027621f5,
-/* 0x04db: cmd_exec_init_dst_surface */
- 0xf40749f0,
- 0x57f00231,
- 0x083fc82c,
- 0xf50a0bf4,
- 0xf4038221,
-/* 0x04ee: dst_tiled */
- 0x21f50a0e,
- 0x49f00276,
-/* 0x04f5: cmd_exec_kick */
- 0x0057f108,
- 0x0654b608,
- 0xd0210698,
- 0x67f04056,
- 0x0063f141,
- 0x0546fd44,
- 0xc80054d0,
- 0x0bf40c3f,
- 0xc521f507,
-/* 0x0519: cmd_exec_done */
-/* 0x051b: cmd_wrcache_flush */
- 0xf100f803,
- 0xbd220027,
- 0x0133f034,
- 0xf80023d0,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
#include "drmP.h"
#include "nouveau_drv.h"
-#include "nouveau_bios.h"
+#include <nouveau_bios.h>
#include "nouveau_pm.h"
static u32 read_clk(struct drm_device *, int, bool);
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/firmware.h>
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_util.h"
-#include "nouveau_vm.h"
-#include "nouveau_ramht.h"
-#include "nvc0_copy.fuc.h"
-
-struct nvc0_copy_engine {
- struct nouveau_exec_engine base;
- u32 irq;
- u32 pmc;
- u32 fuc;
- u32 ctx;
-};
-
-static int
-nvc0_copy_context_new(struct nouveau_channel *chan, int engine)
-{
- struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_gpuobj *ramin = chan->ramin;
- struct nouveau_gpuobj *ctx = NULL;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 256, 256,
- NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER |
- NVOBJ_FLAG_ZERO_ALLOC, &ctx);
- if (ret)
- return ret;
-
- nv_wo32(ramin, pcopy->ctx + 0, lower_32_bits(ctx->linst));
- nv_wo32(ramin, pcopy->ctx + 4, upper_32_bits(ctx->linst));
- dev_priv->engine.instmem.flush(dev);
-
- chan->engctx[engine] = ctx;
- return 0;
-}
-
-static int
-nvc0_copy_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- return 0;
-}
-
-static void
-nvc0_copy_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nvc0_copy_engine *pcopy = nv_engine(chan->dev, engine);
- struct nouveau_gpuobj *ctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
- u32 inst;
-
- inst = (chan->ramin->vinst >> 12);
- inst |= 0x40000000;
-
- /* disable fifo access */
- nv_wr32(dev, pcopy->fuc + 0x048, 0x00000000);
- /* mark channel as unloaded if it's currently active */
- if (nv_rd32(dev, pcopy->fuc + 0x050) == inst)
- nv_mask(dev, pcopy->fuc + 0x050, 0x40000000, 0x00000000);
- /* mark next channel as invalid if it's about to be loaded */
- if (nv_rd32(dev, pcopy->fuc + 0x054) == inst)
- nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
- /* restore fifo access */
- nv_wr32(dev, pcopy->fuc + 0x048, 0x00000003);
-
- nv_wo32(chan->ramin, pcopy->ctx + 0, 0x00000000);
- nv_wo32(chan->ramin, pcopy->ctx + 4, 0x00000000);
- nouveau_gpuobj_ref(NULL, &ctx);
-
- chan->engctx[engine] = ctx;
-}
-
-static int
-nvc0_copy_init(struct drm_device *dev, int engine)
-{
- struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
- int i;
-
- nv_mask(dev, 0x000200, pcopy->pmc, 0x00000000);
- nv_mask(dev, 0x000200, pcopy->pmc, pcopy->pmc);
- nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
-
- nv_wr32(dev, pcopy->fuc + 0x1c0, 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_data) / 4; i++)
- nv_wr32(dev, pcopy->fuc + 0x1c4, nvc0_pcopy_data[i]);
-
- nv_wr32(dev, pcopy->fuc + 0x180, 0x01000000);
- for (i = 0; i < sizeof(nvc0_pcopy_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, pcopy->fuc + 0x188, i >> 6);
- nv_wr32(dev, pcopy->fuc + 0x184, nvc0_pcopy_code[i]);
- }
-
- nv_wr32(dev, pcopy->fuc + 0x084, engine - NVOBJ_ENGINE_COPY0);
- nv_wr32(dev, pcopy->fuc + 0x10c, 0x00000000);
- nv_wr32(dev, pcopy->fuc + 0x104, 0x00000000); /* ENTRY */
- nv_wr32(dev, pcopy->fuc + 0x100, 0x00000002); /* TRIGGER */
- return 0;
-}
-
-static int
-nvc0_copy_fini(struct drm_device *dev, int engine, bool suspend)
-{
- struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
-
- nv_mask(dev, pcopy->fuc + 0x048, 0x00000003, 0x00000000);
-
- /* trigger fuc context unload */
- nv_wait(dev, pcopy->fuc + 0x008, 0x0000000c, 0x00000000);
- nv_mask(dev, pcopy->fuc + 0x054, 0x40000000, 0x00000000);
- nv_wr32(dev, pcopy->fuc + 0x000, 0x00000008);
- nv_wait(dev, pcopy->fuc + 0x008, 0x00000008, 0x00000000);
-
- nv_wr32(dev, pcopy->fuc + 0x014, 0xffffffff);
- return 0;
-}
-
-static struct nouveau_enum nvc0_copy_isr_error_name[] = {
- { 0x0001, "ILLEGAL_MTHD" },
- { 0x0002, "INVALID_ENUM" },
- { 0x0003, "INVALID_BITFIELD" },
- {}
-};
-
-static void
-nvc0_copy_isr(struct drm_device *dev, int engine)
-{
- struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
- u32 disp = nv_rd32(dev, pcopy->fuc + 0x01c);
- u32 stat = nv_rd32(dev, pcopy->fuc + 0x008) & disp & ~(disp >> 16);
- u64 inst = (u64)(nv_rd32(dev, pcopy->fuc + 0x050) & 0x0fffffff) << 12;
- u32 chid = nvc0_graph_isr_chid(dev, inst);
- u32 ssta = nv_rd32(dev, pcopy->fuc + 0x040) & 0x0000ffff;
- u32 addr = nv_rd32(dev, pcopy->fuc + 0x040) >> 16;
- u32 mthd = (addr & 0x07ff) << 2;
- u32 subc = (addr & 0x3800) >> 11;
- u32 data = nv_rd32(dev, pcopy->fuc + 0x044);
-
- if (stat & 0x00000040) {
- NV_INFO(dev, "PCOPY: DISPATCH_ERROR [");
- nouveau_enum_print(nvc0_copy_isr_error_name, ssta);
- printk("] ch %d [0x%010llx] subc %d mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, mthd, data);
- nv_wr32(dev, pcopy->fuc + 0x004, 0x00000040);
- stat &= ~0x00000040;
- }
-
- if (stat) {
- NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat);
- nv_wr32(dev, pcopy->fuc + 0x004, stat);
- }
-}
-
-static void
-nvc0_copy_isr_0(struct drm_device *dev)
-{
- nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY0);
-}
-
-static void
-nvc0_copy_isr_1(struct drm_device *dev)
-{
- nvc0_copy_isr(dev, NVOBJ_ENGINE_COPY1);
-}
-
-static void
-nvc0_copy_destroy(struct drm_device *dev, int engine)
-{
- struct nvc0_copy_engine *pcopy = nv_engine(dev, engine);
-
- nouveau_irq_unregister(dev, pcopy->irq);
-
- if (engine == NVOBJ_ENGINE_COPY0)
- NVOBJ_ENGINE_DEL(dev, COPY0);
- else
- NVOBJ_ENGINE_DEL(dev, COPY1);
- kfree(pcopy);
-}
-
-int
-nvc0_copy_create(struct drm_device *dev, int engine)
-{
- struct nvc0_copy_engine *pcopy;
-
- pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL);
- if (!pcopy)
- return -ENOMEM;
-
- pcopy->base.destroy = nvc0_copy_destroy;
- pcopy->base.init = nvc0_copy_init;
- pcopy->base.fini = nvc0_copy_fini;
- pcopy->base.context_new = nvc0_copy_context_new;
- pcopy->base.context_del = nvc0_copy_context_del;
- pcopy->base.object_new = nvc0_copy_object_new;
-
- if (engine == 0) {
- pcopy->irq = 5;
- pcopy->pmc = 0x00000040;
- pcopy->fuc = 0x104000;
- pcopy->ctx = 0x0230;
- nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_0);
- NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base);
- NVOBJ_CLASS(dev, 0x90b5, COPY0);
- } else {
- pcopy->irq = 6;
- pcopy->pmc = 0x00000080;
- pcopy->fuc = 0x105000;
- pcopy->ctx = 0x0240;
- nouveau_irq_register(dev, pcopy->irq, nvc0_copy_isr_1);
- NVOBJ_ENGINE_ADD(dev, COPY1, &pcopy->base);
- NVOBJ_CLASS(dev, 0x90b8, COPY1);
- }
-
- return 0;
-}
+++ /dev/null
-u32 nvc0_pcopy_data[] = {
-/* 0x0000: ctx_object */
- 0x00000000,
-/* 0x0004: ctx_query_address_high */
- 0x00000000,
-/* 0x0008: ctx_query_address_low */
- 0x00000000,
-/* 0x000c: ctx_query_counter */
- 0x00000000,
-/* 0x0010: ctx_src_address_high */
- 0x00000000,
-/* 0x0014: ctx_src_address_low */
- 0x00000000,
-/* 0x0018: ctx_src_pitch */
- 0x00000000,
-/* 0x001c: ctx_src_tile_mode */
- 0x00000000,
-/* 0x0020: ctx_src_xsize */
- 0x00000000,
-/* 0x0024: ctx_src_ysize */
- 0x00000000,
-/* 0x0028: ctx_src_zsize */
- 0x00000000,
-/* 0x002c: ctx_src_zoff */
- 0x00000000,
-/* 0x0030: ctx_src_xoff */
- 0x00000000,
-/* 0x0034: ctx_src_yoff */
- 0x00000000,
-/* 0x0038: ctx_src_cpp */
- 0x00000000,
-/* 0x003c: ctx_dst_address_high */
- 0x00000000,
-/* 0x0040: ctx_dst_address_low */
- 0x00000000,
-/* 0x0044: ctx_dst_pitch */
- 0x00000000,
-/* 0x0048: ctx_dst_tile_mode */
- 0x00000000,
-/* 0x004c: ctx_dst_xsize */
- 0x00000000,
-/* 0x0050: ctx_dst_ysize */
- 0x00000000,
-/* 0x0054: ctx_dst_zsize */
- 0x00000000,
-/* 0x0058: ctx_dst_zoff */
- 0x00000000,
-/* 0x005c: ctx_dst_xoff */
- 0x00000000,
-/* 0x0060: ctx_dst_yoff */
- 0x00000000,
-/* 0x0064: ctx_dst_cpp */
- 0x00000000,
-/* 0x0068: ctx_format */
- 0x00000000,
-/* 0x006c: ctx_swz_const0 */
- 0x00000000,
-/* 0x0070: ctx_swz_const1 */
- 0x00000000,
-/* 0x0074: ctx_xcnt */
- 0x00000000,
-/* 0x0078: ctx_ycnt */
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-/* 0x0100: dispatch_table */
- 0x00010000,
- 0x00000000,
- 0x00000000,
- 0x00010040,
- 0x0001019f,
- 0x00000000,
- 0x00010050,
- 0x000101a1,
- 0x00000000,
- 0x00070080,
- 0x0000001c,
- 0xfffff000,
- 0x00000020,
- 0xfff80000,
- 0x00000024,
- 0xffffe000,
- 0x00000028,
- 0xfffff800,
- 0x0000002c,
- 0xfffff000,
- 0x00000030,
- 0xfff80000,
- 0x00000034,
- 0xffffe000,
- 0x00070088,
- 0x00000048,
- 0xfffff000,
- 0x0000004c,
- 0xfff80000,
- 0x00000050,
- 0xffffe000,
- 0x00000054,
- 0xfffff800,
- 0x00000058,
- 0xfffff000,
- 0x0000005c,
- 0xfff80000,
- 0x00000060,
- 0xffffe000,
- 0x000200c0,
- 0x000104b8,
- 0x00000000,
- 0x00010541,
- 0x00000000,
- 0x000e00c3,
- 0x00000010,
- 0xffffff00,
- 0x00000014,
- 0x00000000,
- 0x0000003c,
- 0xffffff00,
- 0x00000040,
- 0x00000000,
- 0x00000018,
- 0xfff80000,
- 0x00000044,
- 0xfff80000,
- 0x00000074,
- 0xffff0000,
- 0x00000078,
- 0xffffe000,
- 0x00000068,
- 0xfccc0000,
- 0x0000006c,
- 0x00000000,
- 0x00000070,
- 0x00000000,
- 0x00000004,
- 0xffffff00,
- 0x00000008,
- 0x00000000,
- 0x0000000c,
- 0x00000000,
- 0x00000800,
-};
-
-u32 nvc0_pcopy_code[] = {
-/* 0x0000: main */
- 0x04fe04bd,
- 0x3517f000,
- 0xf10010fe,
- 0xf1040017,
- 0xf0fff327,
- 0x12d00023,
- 0x0c25f0c0,
- 0xf40012d0,
- 0x17f11031,
- 0x27f01200,
- 0x0012d003,
-/* 0x002f: spin */
- 0xf40031f4,
- 0x0ef40028,
-/* 0x0035: ih */
- 0x8001cffd,
- 0xf40812c4,
- 0x21f4060b,
-/* 0x0041: ih_no_chsw */
- 0x0412c4ca,
- 0xf5070bf4,
-/* 0x004b: ih_no_cmd */
- 0xc4010221,
- 0x01d00c11,
-/* 0x0053: swctx */
- 0xf101f840,
- 0xfe770047,
- 0x47f1004b,
- 0x44cf2100,
- 0x0144f000,
- 0xb60444b6,
- 0xf7f13040,
- 0xf4b6061c,
- 0x1457f106,
- 0x00f5d101,
- 0xb6043594,
- 0x57fe0250,
- 0x0145fe00,
- 0x010052b7,
- 0x00ff67f1,
- 0x56fd60bd,
- 0x0253f004,
- 0xf80545fa,
- 0x0053f003,
- 0xd100e7f0,
- 0x549800fe,
- 0x0845b600,
- 0xb6015698,
- 0x46fd1864,
- 0x0047fe05,
- 0xf00204b9,
- 0x01f40643,
- 0x0604fa09,
-/* 0x00c3: swctx_load */
- 0xfa060ef4,
-/* 0x00c6: swctx_done */
- 0x03f80504,
-/* 0x00ca: chsw */
- 0x27f100f8,
- 0x23cf1400,
- 0x1e3fc800,
- 0xf4170bf4,
- 0x21f40132,
- 0x1e3af053,
- 0xf00023d0,
- 0x24d00147,
-/* 0x00eb: chsw_no_unload */
- 0xcf00f880,
- 0x3dc84023,
- 0x090bf41e,
- 0xf40131f4,
-/* 0x00fa: chsw_finish_load */
- 0x37f05321,
- 0x8023d002,
-/* 0x0102: dispatch */
- 0x37f100f8,
- 0x32cf1900,
- 0x0033cf40,
- 0x07ff24e4,
- 0xf11024b6,
- 0xbd010057,
-/* 0x011b: dispatch_loop */
- 0x5874bd64,
- 0x57580056,
- 0x0450b601,
- 0xf40446b8,
- 0x76bb4d08,
- 0x0447b800,
- 0xbb0f08f4,
- 0x74b60276,
- 0x0057bb03,
-/* 0x013f: dispatch_valid_mthd */
- 0xbbdf0ef4,
- 0x44b60246,
- 0x0045bb03,
- 0xfd014598,
- 0x54b00453,
- 0x201bf400,
- 0x58004558,
- 0x64b00146,
- 0x091bf400,
- 0xf4005380,
-/* 0x0166: dispatch_cmd */
- 0x32f4300e,
- 0xf455f901,
- 0x0ef40c01,
-/* 0x0171: dispatch_invalid_bitfield */
- 0x0225f025,
-/* 0x0174: dispatch_illegal_mthd */
-/* 0x0177: dispatch_error */
- 0xf10125f0,
- 0xd0100047,
- 0x43d00042,
- 0x4027f040,
-/* 0x0187: hostirq_wait */
- 0xcf0002d0,
- 0x24f08002,
- 0x0024b040,
-/* 0x0193: dispatch_done */
- 0xf1f71bf4,
- 0xf01d0027,
- 0x23d00137,
-/* 0x019f: cmd_nop */
- 0xf800f800,
-/* 0x01a1: cmd_pm_trigger */
- 0x0027f100,
- 0xf034bd22,
- 0x23d00233,
-/* 0x01af: cmd_exec_set_format */
- 0xf400f800,
- 0x01b0f030,
- 0x0101b000,
- 0xb00201b0,
- 0x04980301,
- 0x3045c71a,
- 0xc70150b6,
- 0x60b63446,
- 0x3847c701,
- 0xf40170b6,
- 0x84bd0232,
-/* 0x01da: ncomp_loop */
- 0x4ac494bd,
- 0x0445b60f,
-/* 0x01e2: bpc_loop */
- 0xa430b4bd,
- 0x0f18f404,
- 0xbbc0a5ff,
- 0x31f400cb,
- 0x220ef402,
-/* 0x01f4: cmp_c0 */
- 0xf00c1bf4,
- 0xcbbb10c7,
- 0x160ef400,
-/* 0x0200: cmp_c1 */
- 0xf406a430,
- 0xc7f00c18,
- 0x00cbbb14,
-/* 0x020f: cmp_zero */
- 0xf1070ef4,
-/* 0x0213: bpc_next */
- 0x380080c7,
- 0x80b601c8,
- 0x01b0b601,
- 0xf404b5b8,
- 0x90b6c308,
- 0x0497b801,
- 0xfdb208f4,
- 0x06800065,
- 0x1d08980e,
- 0xf40068fd,
- 0x64bd0502,
-/* 0x023c: dst_xcnt */
- 0x800075fd,
- 0x78fd1907,
- 0x1057f100,
- 0x0654b608,
- 0xd00056d0,
- 0x50b74057,
- 0x06980800,
- 0x0162b619,
- 0x980864b6,
- 0x72b60e07,
- 0x0567fd01,
- 0xb70056d0,
- 0xb4010050,
- 0x56d00060,
- 0x0160b400,
- 0xb44056d0,
- 0x56d00260,
- 0x0360b480,
- 0xb7c056d0,
- 0x98040050,
- 0x56d01b06,
- 0x1c069800,
- 0xf44056d0,
- 0x00f81030,
-/* 0x029c: cmd_exec_set_surface_tiled */
- 0xc7075798,
- 0x78c76879,
- 0x0380b664,
- 0xb06077c7,
- 0x1bf40e76,
- 0x0477f009,
-/* 0x02b7: xtile64 */
- 0xf00f0ef4,
- 0x70b6027c,
- 0x0947fd11,
-/* 0x02c3: xtileok */
- 0x980677f0,
- 0x5b980c5a,
- 0x00abfd0e,
- 0xbb01b7f0,
- 0xb2b604b7,
- 0xc4abff01,
- 0x9805a7bb,
- 0xe7f00d5d,
- 0x04e8bb01,
- 0xff01e2b6,
- 0xd8bbb4de,
- 0x01e0b605,
- 0xbb0cef94,
- 0xfefd02eb,
- 0x026cf005,
- 0x020860b7,
- 0xd00864b6,
- 0xb7bb006f,
- 0x00cbbb04,
- 0x98085f98,
- 0xfbfd0e5b,
- 0x01b7f000,
- 0xb604b7bb,
- 0xfbbb01b2,
- 0x05f7bb00,
- 0x5f98f0f9,
- 0x01b7f009,
- 0xb604b8bb,
- 0xfbbb01b2,
- 0x05f8bb00,
- 0x78bbf0f9,
- 0x0282b600,
- 0xbb01b7f0,
- 0xb9bb04b8,
- 0x0b589804,
- 0xbb01e7f0,
- 0xe2b604e9,
- 0xf48eff01,
- 0xbb04f7bb,
- 0x79bb00cf,
- 0x0589bb00,
- 0x90fcf0fc,
- 0xbb00d9fd,
- 0x89fd00ad,
- 0x008ffd00,
- 0xbb00a8bb,
- 0x92b604a7,
- 0x0497bb01,
- 0x988069d0,
- 0x58980557,
- 0x00acbb04,
- 0xb6007abb,
- 0x84b60081,
- 0x058bfd10,
- 0x060062b7,
- 0xb70067d0,
- 0xd0040060,
- 0x00f80068,
-/* 0x03a8: cmd_exec_set_surface_linear */
- 0xb7026cf0,
- 0xb6020260,
- 0x57980864,
- 0x0067d005,
- 0x040060b7,
- 0xb6045798,
- 0x67d01074,
- 0x0060b700,
- 0x06579804,
- 0xf80067d0,
-/* 0x03d1: cmd_exec_wait */
- 0xf900f900,
- 0x0007f110,
- 0x0604b608,
-/* 0x03dc: loop */
- 0xf00001cf,
- 0x1bf40114,
- 0xfc10fcfa,
-/* 0x03eb: cmd_exec_query */
- 0xc800f800,
- 0x1bf40d34,
- 0xd121f570,
- 0x0c47f103,
- 0x0644b608,
- 0xb6020598,
- 0x45d00450,
- 0x4040d000,
- 0xd00c57f0,
- 0x40b78045,
- 0x05980400,
- 0x1054b601,
- 0xb70045d0,
- 0xf1050040,
- 0xf00b0057,
- 0x45d00153,
- 0x4057f100,
- 0x0154b640,
- 0x808053f1,
- 0xf14045d0,
- 0xf1111057,
- 0xd0131253,
- 0x57f18045,
- 0x53f11514,
- 0x45d01716,
- 0x0157f1c0,
- 0x0153f026,
- 0x080047f1,
- 0xd00644b6,
-/* 0x045e: query_counter */
- 0x21f50045,
- 0x47f103d1,
- 0x44b6080c,
- 0x02059806,
- 0xd00045d0,
- 0x57f04040,
- 0x8045d004,
- 0x040040b7,
- 0xb6010598,
- 0x45d01054,
- 0x0040b700,
- 0x0057f105,
- 0x0045d003,
- 0x111057f1,
- 0x131253f1,
- 0x984045d0,
- 0x40b70305,
- 0x45d00500,
- 0x0157f100,
- 0x0153f026,
- 0x080047f1,
- 0xd00644b6,
- 0x00f80045,
-/* 0x04b8: cmd_exec */
- 0x03d121f5,
- 0xf4003fc8,
- 0x21f50e0b,
- 0x47f101af,
- 0x0ef40200,
-/* 0x04cd: cmd_exec_no_format */
- 0x1067f11e,
- 0x0664b608,
- 0x800177f0,
- 0x07800e07,
- 0x1d079819,
- 0xd00067d0,
- 0x44bd4067,
-/* 0x04e8: cmd_exec_init_src_surface */
- 0xbd0232f4,
- 0x043fc854,
- 0xf50a0bf4,
- 0xf403a821,
-/* 0x04fa: src_tiled */
- 0x21f50a0e,
- 0x49f0029c,
-/* 0x0501: cmd_exec_init_dst_surface */
- 0x0231f407,
- 0xc82c57f0,
- 0x0bf4083f,
- 0xa821f50a,
- 0x0a0ef403,
-/* 0x0514: dst_tiled */
- 0x029c21f5,
-/* 0x051b: cmd_exec_kick */
- 0xf10849f0,
- 0xb6080057,
- 0x06980654,
- 0x4056d01e,
- 0xf14167f0,
- 0xfd440063,
- 0x54d00546,
- 0x0c3fc800,
- 0xf5070bf4,
-/* 0x053f: cmd_exec_done */
- 0xf803eb21,
-/* 0x0541: cmd_wrcache_flush */
- 0x0027f100,
- 0xf034bd22,
- 0x23d00133,
- 0x0000f800,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
+++ /dev/null
-/*
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "drm.h"
-#include "nouveau_drv.h"
-#include "nouveau_drm.h"
-
-struct nvc0_fb_priv {
- struct page *r100c10_page;
- dma_addr_t r100c10;
-};
-
-static inline void
-nvc0_mfb_subp_isr(struct drm_device *dev, int unit, int subp)
-{
- u32 subp_base = 0x141000 + (unit * 0x2000) + (subp * 0x400);
- u32 stat = nv_rd32(dev, subp_base + 0x020);
-
- if (stat) {
- NV_INFO(dev, "PMFB%d_SUBP%d: 0x%08x\n", unit, subp, stat);
- nv_wr32(dev, subp_base + 0x020, stat);
- }
-}
-
-static void
-nvc0_mfb_isr(struct drm_device *dev)
-{
- u32 units = nv_rd32(dev, 0x00017c);
- while (units) {
- u32 subp, unit = ffs(units) - 1;
- for (subp = 0; subp < 2; subp++)
- nvc0_mfb_subp_isr(dev, unit, subp);
- units &= ~(1 << unit);
- }
-
- /* we do something horribly wrong and upset PMFB a lot, so mask off
- * interrupts from it after the first one until it's fixed
- */
- nv_mask(dev, 0x000640, 0x02000000, 0x00000000);
-}
-
-static void
-nvc0_fb_destroy(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nvc0_fb_priv *priv = pfb->priv;
-
- nouveau_irq_unregister(dev, 25);
-
- if (priv->r100c10_page) {
- pci_unmap_page(dev->pdev, priv->r100c10, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- __free_page(priv->r100c10_page);
- }
-
- kfree(priv);
- pfb->priv = NULL;
-}
-
-static int
-nvc0_fb_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nvc0_fb_priv *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- pfb->priv = priv;
-
- priv->r100c10_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
- if (!priv->r100c10_page) {
- nvc0_fb_destroy(dev);
- return -ENOMEM;
- }
-
- priv->r100c10 = pci_map_page(dev->pdev, priv->r100c10_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, priv->r100c10)) {
- nvc0_fb_destroy(dev);
- return -EFAULT;
- }
-
- nouveau_irq_register(dev, 25, nvc0_mfb_isr);
- return 0;
-}
-
-int
-nvc0_fb_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fb_priv *priv;
- int ret;
-
- if (!dev_priv->engine.fb.priv) {
- ret = nvc0_fb_create(dev);
- if (ret)
- return ret;
- }
- priv = dev_priv->engine.fb.priv;
-
- nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
- nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */
- return 0;
-}
-
-void
-nvc0_fb_takedown(struct drm_device *dev)
-{
- nvc0_fb_destroy(dev);
-}
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_fbcon.h"
-#include "nouveau_mm.h"
+#include <core/mm.h>
int
nvc0_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_fifo.h"
-#include "nouveau_ramht.h"
+#include <engine/fifo.h>
+#include <core/ramht.h>
#include "nouveau_fence.h"
struct nvc0_fence_priv {
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nouveau_fifo.h"
-
-static void nvc0_fifo_isr(struct drm_device *);
-
-struct nvc0_fifo_priv {
- struct nouveau_fifo_priv base;
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
- struct nouveau_vma user_vma;
- int spoon_nr;
-};
-
-struct nvc0_fifo_chan {
- struct nouveau_fifo_chan base;
- struct nouveau_gpuobj *user;
-};
-
-static void
-nvc0_fifo_playlist_update(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct nouveau_gpuobj *cur;
- int i, p;
-
- cur = priv->playlist[priv->cur_playlist];
- priv->cur_playlist = !priv->cur_playlist;
-
- for (i = 0, p = 0; i < 128; i++) {
- if (!(nv_rd32(dev, 0x3004 + (i * 8)) & 1))
- continue;
- nv_wo32(cur, p + 0, i);
- nv_wo32(cur, p + 4, 0x00000004);
- p += 8;
- }
- pinstmem->flush(dev);
-
- nv_wr32(dev, 0x002270, cur->vinst >> 12);
- nv_wr32(dev, 0x002274, 0x01f00000 | (p >> 3));
- if (!nv_wait(dev, 0x00227c, 0x00100000, 0x00000000))
- NV_ERROR(dev, "PFIFO - playlist update failed\n");
-}
-
-static int
-nvc0_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
- struct nvc0_fifo_chan *fctx;
- u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
- int ret, i;
-
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
- priv->user_vma.offset + (chan->id * 0x1000),
- PAGE_SIZE);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* allocate vram for control regs, map into polling area */
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &fctx->user);
- if (ret)
- goto error;
-
- nouveau_vm_map_at(&priv->user_vma, chan->id * 0x1000,
- *(struct nouveau_mem **)fctx->user->node);
-
- for (i = 0; i < 0x100; i += 4)
- nv_wo32(chan->ramin, i, 0x00000000);
- nv_wo32(chan->ramin, 0x08, lower_32_bits(fctx->user->vinst));
- nv_wo32(chan->ramin, 0x0c, upper_32_bits(fctx->user->vinst));
- nv_wo32(chan->ramin, 0x10, 0x0000face);
- nv_wo32(chan->ramin, 0x30, 0xfffff902);
- nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
- nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
- upper_32_bits(ib_virt));
- nv_wo32(chan->ramin, 0x54, 0x00000002);
- nv_wo32(chan->ramin, 0x84, 0x20400000);
- nv_wo32(chan->ramin, 0x94, 0x30000001);
- nv_wo32(chan->ramin, 0x9c, 0x00000100);
- nv_wo32(chan->ramin, 0xa4, 0x1f1f1f1f);
- nv_wo32(chan->ramin, 0xa8, 0x1f1f1f1f);
- nv_wo32(chan->ramin, 0xac, 0x0000001f);
- nv_wo32(chan->ramin, 0xb8, 0xf8000000);
- nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
- nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
- pinstmem->flush(dev);
-
- nv_wr32(dev, 0x003000 + (chan->id * 8), 0xc0000000 |
- (chan->ramin->vinst >> 12));
- nv_wr32(dev, 0x003004 + (chan->id * 8), 0x001f0001);
- nvc0_fifo_playlist_update(dev);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
-}
-
-static void
-nvc0_fifo_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nvc0_fifo_chan *fctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
-
- nv_mask(dev, 0x003004 + (chan->id * 8), 0x00000001, 0x00000000);
- nv_wr32(dev, 0x002634, chan->id);
- if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
- NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
- nvc0_fifo_playlist_update(dev);
- nv_wr32(dev, 0x003000 + (chan->id * 8), 0x00000000);
-
- nouveau_gpuobj_ref(NULL, &fctx->user);
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
- }
-
- chan->engctx[engine] = NULL;
- kfree(fctx);
-}
-
-static int
-nvc0_fifo_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fifo_priv *priv = nv_engine(dev, engine);
- struct nouveau_channel *chan;
- int i;
-
- /* reset PFIFO, enable all available PSUBFIFO areas */
- nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
- nv_wr32(dev, 0x000204, 0xffffffff);
- nv_wr32(dev, 0x002204, 0xffffffff);
-
- priv->spoon_nr = hweight32(nv_rd32(dev, 0x002204));
- NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
-
- /* assign engines to subfifos */
- if (priv->spoon_nr >= 3) {
- nv_wr32(dev, 0x002208, ~(1 << 0)); /* PGRAPH */
- nv_wr32(dev, 0x00220c, ~(1 << 1)); /* PVP */
- nv_wr32(dev, 0x002210, ~(1 << 1)); /* PPP */
- nv_wr32(dev, 0x002214, ~(1 << 1)); /* PBSP */
- nv_wr32(dev, 0x002218, ~(1 << 2)); /* PCE0 */
- nv_wr32(dev, 0x00221c, ~(1 << 1)); /* PCE1 */
- }
-
- /* PSUBFIFO[n] */
- for (i = 0; i < priv->spoon_nr; i++) {
- nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
- nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
- nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
- }
-
- nv_mask(dev, 0x002200, 0x00000001, 0x00000001);
- nv_wr32(dev, 0x002254, 0x10000000 | priv->user_vma.offset >> 12);
-
- nv_wr32(dev, 0x002a00, 0xffffffff); /* clears PFIFO.INTR bit 30 */
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xbfffffff);
-
- /* restore PFIFO context table */
- for (i = 0; i < 128; i++) {
- chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->engctx[engine])
- continue;
-
- nv_wr32(dev, 0x003000 + (i * 8), 0xc0000000 |
- (chan->ramin->vinst >> 12));
- nv_wr32(dev, 0x003004 + (i * 8), 0x001f0001);
- }
- nvc0_fifo_playlist_update(dev);
-
- return 0;
-}
-
-static int
-nvc0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
- int i;
-
- for (i = 0; i < 128; i++) {
- if (!(nv_rd32(dev, 0x003004 + (i * 8)) & 1))
- continue;
-
- nv_mask(dev, 0x003004 + (i * 8), 0x00000001, 0x00000000);
- nv_wr32(dev, 0x002634, i);
- if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
- NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
- i, nv_rd32(dev, 0x002634));
- return -EBUSY;
- }
- }
-
- nv_wr32(dev, 0x002140, 0x00000000);
- return 0;
-}
-
-
-struct nouveau_enum nvc0_fifo_fault_unit[] = {
- { 0x00, "PGRAPH" },
- { 0x03, "PEEPHOLE" },
- { 0x04, "BAR1" },
- { 0x05, "BAR3" },
- { 0x07, "PFIFO" },
- { 0x10, "PBSP" },
- { 0x11, "PPPP" },
- { 0x13, "PCOUNTER" },
- { 0x14, "PVP" },
- { 0x15, "PCOPY0" },
- { 0x16, "PCOPY1" },
- { 0x17, "PDAEMON" },
- {}
-};
-
-struct nouveau_enum nvc0_fifo_fault_reason[] = {
- { 0x00, "PT_NOT_PRESENT" },
- { 0x01, "PT_TOO_SHORT" },
- { 0x02, "PAGE_NOT_PRESENT" },
- { 0x03, "VM_LIMIT_EXCEEDED" },
- { 0x04, "NO_CHANNEL" },
- { 0x05, "PAGE_SYSTEM_ONLY" },
- { 0x06, "PAGE_READ_ONLY" },
- { 0x0a, "COMPRESSED_SYSRAM" },
- { 0x0c, "INVALID_STORAGE_TYPE" },
- {}
-};
-
-struct nouveau_enum nvc0_fifo_fault_hubclient[] = {
- { 0x01, "PCOPY0" },
- { 0x02, "PCOPY1" },
- { 0x04, "DISPATCH" },
- { 0x05, "CTXCTL" },
- { 0x06, "PFIFO" },
- { 0x07, "BAR_READ" },
- { 0x08, "BAR_WRITE" },
- { 0x0b, "PVP" },
- { 0x0c, "PPPP" },
- { 0x0d, "PBSP" },
- { 0x11, "PCOUNTER" },
- { 0x12, "PDAEMON" },
- { 0x14, "CCACHE" },
- { 0x15, "CCACHE_POST" },
- {}
-};
-
-struct nouveau_enum nvc0_fifo_fault_gpcclient[] = {
- { 0x01, "TEX" },
- { 0x0c, "ESETUP" },
- { 0x0e, "CTXCTL" },
- { 0x0f, "PROP" },
- {}
-};
-
-struct nouveau_bitfield nvc0_fifo_subfifo_intr[] = {
-/* { 0x00008000, "" } seen with null ib push */
- { 0x00200000, "ILLEGAL_MTHD" },
- { 0x00800000, "EMPTY_SUBC" },
- {}
-};
-
-static void
-nvc0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
-{
- u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
- u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
- u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
- u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
- u32 client = (stat & 0x00001f00) >> 8;
-
- NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
- (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
- nouveau_enum_print(nvc0_fifo_fault_reason, stat & 0x0000000f);
- printk("] from ");
- nouveau_enum_print(nvc0_fifo_fault_unit, unit);
- if (stat & 0x00000040) {
- printk("/");
- nouveau_enum_print(nvc0_fifo_fault_hubclient, client);
- } else {
- printk("/GPC%d/", (stat & 0x1f000000) >> 24);
- nouveau_enum_print(nvc0_fifo_fault_gpcclient, client);
- }
- printk(" on channel 0x%010llx\n", (u64)inst << 12);
-}
-
-static int
-nvc0_fifo_page_flip(struct drm_device *dev, u32 chid)
-{
- struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
- unsigned long flags;
- int ret = -EINVAL;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < priv->base.channels)) {
- chan = dev_priv->channels.ptr[chid];
- if (likely(chan))
- ret = nouveau_finish_page_flip(chan, NULL);
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return ret;
-}
-
-static void
-nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
-{
- u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
- u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
- u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
- u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
- u32 subc = (addr & 0x00070000);
- u32 mthd = (addr & 0x00003ffc);
- u32 show = stat;
-
- if (stat & 0x00200000) {
- if (mthd == 0x0054) {
- if (!nvc0_fifo_page_flip(dev, chid))
- show &= ~0x00200000;
- }
- }
-
- if (show) {
- NV_INFO(dev, "PFIFO%d:", unit);
- nouveau_bitfield_print(nvc0_fifo_subfifo_intr, show);
- NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
- unit, chid, subc, mthd, data);
- }
-
- nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
- nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
-}
-
-static void
-nvc0_fifo_isr(struct drm_device *dev)
-{
- u32 mask = nv_rd32(dev, 0x002140);
- u32 stat = nv_rd32(dev, 0x002100) & mask;
-
- if (stat & 0x00000100) {
- NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
- nv_wr32(dev, 0x002100, 0x00000100);
- stat &= ~0x00000100;
- }
-
- if (stat & 0x10000000) {
- u32 units = nv_rd32(dev, 0x00259c);
- u32 u = units;
-
- while (u) {
- int i = ffs(u) - 1;
- nvc0_fifo_isr_vm_fault(dev, i);
- u &= ~(1 << i);
- }
-
- nv_wr32(dev, 0x00259c, units);
- stat &= ~0x10000000;
- }
-
- if (stat & 0x20000000) {
- u32 units = nv_rd32(dev, 0x0025a0);
- u32 u = units;
-
- while (u) {
- int i = ffs(u) - 1;
- nvc0_fifo_isr_subfifo_intr(dev, i);
- u &= ~(1 << i);
- }
-
- nv_wr32(dev, 0x0025a0, units);
- stat &= ~0x20000000;
- }
-
- if (stat & 0x40000000) {
- NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
- nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
- stat &= ~0x40000000;
- }
-
- if (stat) {
- NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
- nv_wr32(dev, 0x002100, stat);
- nv_wr32(dev, 0x002140, 0);
- }
-}
-
-static void
-nvc0_fifo_destroy(struct drm_device *dev, int engine)
-{
- struct nvc0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- nouveau_vm_put(&priv->user_vma);
- nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
- nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
-
- dev_priv->eng[engine] = NULL;
- kfree(priv);
-}
-
-int
-nvc0_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_fifo_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.base.destroy = nvc0_fifo_destroy;
- priv->base.base.init = nvc0_fifo_init;
- priv->base.base.fini = nvc0_fifo_fini;
- priv->base.base.context_new = nvc0_fifo_context_new;
- priv->base.base.context_del = nvc0_fifo_context_del;
- priv->base.channels = 128;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[0]);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, 4096, 4096, 0, &priv->playlist[1]);
- if (ret)
- goto error;
-
- ret = nouveau_vm_get(dev_priv->bar1_vm, priv->base.channels * 0x1000,
- 12, NV_MEM_ACCESS_RW, &priv->user_vma);
- if (ret)
- goto error;
-
- nouveau_irq_register(dev, 8, nvc0_fifo_isr);
-error:
- if (ret)
- priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
- return ret;
-}
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/firmware.h>
-#include <linux/module.h>
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nouveau_fifo.h"
-
-#include "nvc0_graph.h"
-#include "nvc0_grhub.fuc.h"
-#include "nvc0_grgpc.fuc.h"
-
-static void
-nvc0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
-{
- NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
- nv_rd32(dev, base + 0x400));
- NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
- nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
- NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
- nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
-}
-
-static void
-nvc0_graph_ctxctl_debug(struct drm_device *dev)
-{
- u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
- u32 gpc;
-
- nvc0_graph_ctxctl_debug_unit(dev, 0x409000);
- for (gpc = 0; gpc < gpcnr; gpc++)
- nvc0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
-}
-
-static int
-nvc0_graph_load_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
-
- nv_wr32(dev, 0x409840, 0x00000030);
- nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
- nv_wr32(dev, 0x409504, 0x00000003);
- if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
- NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
-
- return 0;
-}
-
-static int
-nvc0_graph_unload_context_to(struct drm_device *dev, u64 chan)
-{
- nv_wr32(dev, 0x409840, 0x00000003);
- nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
- nv_wr32(dev, 0x409504, 0x00000009);
- if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-nvc0_graph_construct_context(struct nouveau_channel *chan)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
- struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
- struct drm_device *dev = chan->dev;
- int ret, i;
- u32 *ctx;
-
- ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- if (!nouveau_ctxfw) {
- nv_wr32(dev, 0x409840, 0x80000000);
- nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
- nv_wr32(dev, 0x409504, 0x00000001);
- if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
- NV_ERROR(dev, "PGRAPH: HUB_SET_CHAN timeout\n");
- nvc0_graph_ctxctl_debug(dev);
- ret = -EBUSY;
- goto err;
- }
- } else {
- nvc0_graph_load_context(chan);
-
- nv_wo32(grch->grctx, 0x1c, 1);
- nv_wo32(grch->grctx, 0x20, 0);
- nv_wo32(grch->grctx, 0x28, 0);
- nv_wo32(grch->grctx, 0x2c, 0);
- dev_priv->engine.instmem.flush(dev);
- }
-
- ret = nvc0_grctx_generate(chan);
- if (ret)
- goto err;
-
- if (!nouveau_ctxfw) {
- nv_wr32(dev, 0x409840, 0x80000000);
- nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
- nv_wr32(dev, 0x409504, 0x00000002);
- if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
- NV_ERROR(dev, "PGRAPH: HUB_CTX_SAVE timeout\n");
- nvc0_graph_ctxctl_debug(dev);
- ret = -EBUSY;
- goto err;
- }
- } else {
- ret = nvc0_graph_unload_context_to(dev, chan->ramin->vinst);
- if (ret)
- goto err;
- }
-
- for (i = 0; i < priv->grctx_size; i += 4)
- ctx[i / 4] = nv_ro32(grch->grctx, i);
-
- priv->grctx_vals = ctx;
- return 0;
-
-err:
- kfree(ctx);
- return ret;
-}
-
-static int
-nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
-{
- struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
- struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i = 0, gpc, tp, ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
- &grch->unk408004);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
- &grch->unk40800c);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
- NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
- &grch->unk418810);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
- &grch->mmio);
- if (ret)
- return ret;
-
-
- nv_wo32(grch->mmio, i++ * 4, 0x00408004);
- nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
- nv_wo32(grch->mmio, i++ * 4, 0x00408008);
- nv_wo32(grch->mmio, i++ * 4, 0x80000018);
-
- nv_wo32(grch->mmio, i++ * 4, 0x0040800c);
- nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
- nv_wo32(grch->mmio, i++ * 4, 0x00408010);
- nv_wo32(grch->mmio, i++ * 4, 0x80000000);
-
- nv_wo32(grch->mmio, i++ * 4, 0x00418810);
- nv_wo32(grch->mmio, i++ * 4, 0x80000000 | grch->unk418810->linst >> 12);
- nv_wo32(grch->mmio, i++ * 4, 0x00419848);
- nv_wo32(grch->mmio, i++ * 4, 0x10000000 | grch->unk418810->linst >> 12);
-
- nv_wo32(grch->mmio, i++ * 4, 0x00419004);
- nv_wo32(grch->mmio, i++ * 4, grch->unk40800c->linst >> 8);
- nv_wo32(grch->mmio, i++ * 4, 0x00419008);
- nv_wo32(grch->mmio, i++ * 4, 0x00000000);
-
- nv_wo32(grch->mmio, i++ * 4, 0x00418808);
- nv_wo32(grch->mmio, i++ * 4, grch->unk408004->linst >> 8);
- nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
- nv_wo32(grch->mmio, i++ * 4, 0x80000018);
-
- if (dev_priv->chipset != 0xc1) {
- u32 magic = 0x02180000;
- nv_wo32(grch->mmio, i++ * 4, 0x00405830);
- nv_wo32(grch->mmio, i++ * 4, magic);
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
- u32 reg = TP_UNIT(gpc, tp, 0x520);
- nv_wo32(grch->mmio, i++ * 4, reg);
- nv_wo32(grch->mmio, i++ * 4, magic);
- magic += 0x0324;
- }
- }
- } else {
- u32 magic = 0x02180000;
- nv_wo32(grch->mmio, i++ * 4, 0x00405830);
- nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
- nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
- nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
- u32 reg = TP_UNIT(gpc, tp, 0x520);
- nv_wo32(grch->mmio, i++ * 4, reg);
- nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
- magic += 0x0324;
- }
- for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
- u32 reg = TP_UNIT(gpc, tp, 0x544);
- nv_wo32(grch->mmio, i++ * 4, reg);
- nv_wo32(grch->mmio, i++ * 4, magic);
- magic += 0x0324;
- }
- }
- }
-
- grch->mmio_nr = i / 2;
- return 0;
-}
-
-static int
-nvc0_graph_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nvc0_graph_priv *priv = nv_engine(dev, engine);
- struct nvc0_graph_chan *grch;
- struct nouveau_gpuobj *grctx;
- int ret, i;
-
- grch = kzalloc(sizeof(*grch), GFP_KERNEL);
- if (!grch)
- return -ENOMEM;
- chan->engctx[NVOBJ_ENGINE_GR] = grch;
-
- ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
- NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
- &grch->grctx);
- if (ret)
- goto error;
- grctx = grch->grctx;
-
- ret = nvc0_graph_create_context_mmio_list(chan);
- if (ret)
- goto error;
-
- nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
- nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
- pinstmem->flush(dev);
-
- if (!priv->grctx_vals) {
- ret = nvc0_graph_construct_context(chan);
- if (ret)
- goto error;
- }
-
- for (i = 0; i < priv->grctx_size; i += 4)
- nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
-
- if (!nouveau_ctxfw) {
- nv_wo32(grctx, 0x00, grch->mmio_nr);
- nv_wo32(grctx, 0x04, grch->mmio->linst >> 8);
- } else {
- nv_wo32(grctx, 0xf4, 0);
- nv_wo32(grctx, 0xf8, 0);
- nv_wo32(grctx, 0x10, grch->mmio_nr);
- nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
- nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
- nv_wo32(grctx, 0x1c, 1);
- nv_wo32(grctx, 0x20, 0);
- nv_wo32(grctx, 0x28, 0);
- nv_wo32(grctx, 0x2c, 0);
- }
- pinstmem->flush(dev);
- return 0;
-
-error:
- priv->base.context_del(chan, engine);
- return ret;
-}
-
-static void
-nvc0_graph_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nvc0_graph_chan *grch = chan->engctx[engine];
-
- nouveau_gpuobj_ref(NULL, &grch->mmio);
- nouveau_gpuobj_ref(NULL, &grch->unk418810);
- nouveau_gpuobj_ref(NULL, &grch->unk40800c);
- nouveau_gpuobj_ref(NULL, &grch->unk408004);
- nouveau_gpuobj_ref(NULL, &grch->grctx);
- chan->engctx[engine] = NULL;
-}
-
-static int
-nvc0_graph_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- return 0;
-}
-
-static int
-nvc0_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- return 0;
-}
-
-static void
-nvc0_graph_init_obj418880(struct drm_device *dev)
-{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- int i;
-
- nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
- nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
- for (i = 0; i < 4; i++)
- nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
- nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
- nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
-}
-
-static void
-nvc0_graph_init_regs(struct drm_device *dev)
-{
- nv_wr32(dev, 0x400080, 0x003083c2);
- nv_wr32(dev, 0x400088, 0x00006fe7);
- nv_wr32(dev, 0x40008c, 0x00000000);
- nv_wr32(dev, 0x400090, 0x00000030);
- nv_wr32(dev, 0x40013c, 0x013901f7);
- nv_wr32(dev, 0x400140, 0x00000100);
- nv_wr32(dev, 0x400144, 0x00000000);
- nv_wr32(dev, 0x400148, 0x00000110);
- nv_wr32(dev, 0x400138, 0x00000000);
- nv_wr32(dev, 0x400130, 0x00000000);
- nv_wr32(dev, 0x400134, 0x00000000);
- nv_wr32(dev, 0x400124, 0x00000002);
-}
-
-static void
-nvc0_graph_init_gpc_0(struct drm_device *dev)
-{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tp_total);
- u32 data[TP_MAX / 8];
- u8 tpnr[GPC_MAX];
- int i, gpc, tpc;
-
- nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
-
- /*
- * TP ROP UNKVAL(magic_not_rop_nr)
- * 450: 4/0/0/0 2 3
- * 460: 3/4/0/0 4 1
- * 465: 3/4/4/0 4 7
- * 470: 3/3/4/4 5 5
- * 480: 3/4/4/4 6 6
- */
-
- memset(data, 0x00, sizeof(data));
- memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
- for (i = 0, gpc = -1; i < priv->tp_total; i++) {
- do {
- gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpnr[gpc]);
- tpc = priv->tp_nr[gpc] - tpnr[gpc]--;
-
- data[i / 8] |= tpc << ((i % 8) * 4);
- }
-
- nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
- nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
- nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
- nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
- priv->tp_nr[gpc]);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tp_total);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
- }
-
- nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
- nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
-}
-
-static void
-nvc0_graph_init_units(struct drm_device *dev)
-{
- nv_wr32(dev, 0x409c24, 0x000f0000);
- nv_wr32(dev, 0x404000, 0xc0000000); /* DISPATCH */
- nv_wr32(dev, 0x404600, 0xc0000000); /* M2MF */
- nv_wr32(dev, 0x408030, 0xc0000000);
- nv_wr32(dev, 0x40601c, 0xc0000000);
- nv_wr32(dev, 0x404490, 0xc0000000); /* MACRO */
- nv_wr32(dev, 0x406018, 0xc0000000);
- nv_wr32(dev, 0x405840, 0xc0000000);
- nv_wr32(dev, 0x405844, 0x00ffffff);
- nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
- nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
-}
-
-static void
-nvc0_graph_init_gpc_1(struct drm_device *dev)
-{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- int gpc, tp;
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
- for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
- nv_wr32(dev, TP_UNIT(gpc, tp, 0x508), 0xffffffff);
- nv_wr32(dev, TP_UNIT(gpc, tp, 0x50c), 0xffffffff);
- nv_wr32(dev, TP_UNIT(gpc, tp, 0x224), 0xc0000000);
- nv_wr32(dev, TP_UNIT(gpc, tp, 0x48c), 0xc0000000);
- nv_wr32(dev, TP_UNIT(gpc, tp, 0x084), 0xc0000000);
- nv_wr32(dev, TP_UNIT(gpc, tp, 0x644), 0x001ffffe);
- nv_wr32(dev, TP_UNIT(gpc, tp, 0x64c), 0x0000000f);
- }
- nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
- nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
- }
-}
-
-static void
-nvc0_graph_init_rop(struct drm_device *dev)
-{
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- int rop;
-
- for (rop = 0; rop < priv->rop_nr; rop++) {
- nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
- nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
- nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
- nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
- }
-}
-
-static void
-nvc0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
- struct nvc0_graph_fuc *code, struct nvc0_graph_fuc *data)
-{
- int i;
-
- nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
- for (i = 0; i < data->size / 4; i++)
- nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
-
- nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
- for (i = 0; i < code->size / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, fuc_base + 0x0188, i >> 6);
- nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
- }
-}
-
-static int
-nvc0_graph_init_ctxctl(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- u32 r000260;
- int i;
-
- if (!nouveau_ctxfw) {
- /* load HUB microcode */
- r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x4091c0, 0x01000000);
- for (i = 0; i < sizeof(nvc0_grhub_data) / 4; i++)
- nv_wr32(dev, 0x4091c4, nvc0_grhub_data[i]);
-
- nv_wr32(dev, 0x409180, 0x01000000);
- for (i = 0; i < sizeof(nvc0_grhub_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, 0x409188, i >> 6);
- nv_wr32(dev, 0x409184, nvc0_grhub_code[i]);
- }
-
- /* load GPC microcode */
- nv_wr32(dev, 0x41a1c0, 0x01000000);
- for (i = 0; i < sizeof(nvc0_grgpc_data) / 4; i++)
- nv_wr32(dev, 0x41a1c4, nvc0_grgpc_data[i]);
-
- nv_wr32(dev, 0x41a180, 0x01000000);
- for (i = 0; i < sizeof(nvc0_grgpc_code) / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, 0x41a188, i >> 6);
- nv_wr32(dev, 0x41a184, nvc0_grgpc_code[i]);
- }
- nv_wr32(dev, 0x000260, r000260);
-
- /* start HUB ucode running, it'll init the GPCs */
- nv_wr32(dev, 0x409800, dev_priv->chipset);
- nv_wr32(dev, 0x40910c, 0x00000000);
- nv_wr32(dev, 0x409100, 0x00000002);
- if (!nv_wait(dev, 0x409800, 0x80000000, 0x80000000)) {
- NV_ERROR(dev, "PGRAPH: HUB_INIT timed out\n");
- nvc0_graph_ctxctl_debug(dev);
- return -EBUSY;
- }
-
- priv->grctx_size = nv_rd32(dev, 0x409804);
- return 0;
- }
-
- /* load fuc microcode */
- r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
- nvc0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
- nvc0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
- nv_wr32(dev, 0x000260, r000260);
-
- /* start both of them running */
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x41a10c, 0x00000000);
- nv_wr32(dev, 0x40910c, 0x00000000);
- nv_wr32(dev, 0x41a100, 0x00000002);
- nv_wr32(dev, 0x409100, 0x00000002);
- if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
- NV_INFO(dev, "0x409800 wait failed\n");
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x7fffffff);
- nv_wr32(dev, 0x409504, 0x00000021);
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000010);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
- return -EBUSY;
- }
- priv->grctx_size = nv_rd32(dev, 0x409800);
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000016);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000025);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-nvc0_graph_init(struct drm_device *dev, int engine)
-{
- int ret;
-
- nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
- nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
-
- nvc0_graph_init_obj418880(dev);
- nvc0_graph_init_regs(dev);
- /*nvc0_graph_init_unitplemented_magics(dev);*/
- nvc0_graph_init_gpc_0(dev);
- /*nvc0_graph_init_unitplemented_c242(dev);*/
-
- nv_wr32(dev, 0x400500, 0x00010001);
- nv_wr32(dev, 0x400100, 0xffffffff);
- nv_wr32(dev, 0x40013c, 0xffffffff);
-
- nvc0_graph_init_units(dev);
- nvc0_graph_init_gpc_1(dev);
- nvc0_graph_init_rop(dev);
-
- nv_wr32(dev, 0x400108, 0xffffffff);
- nv_wr32(dev, 0x400138, 0xffffffff);
- nv_wr32(dev, 0x400118, 0xffffffff);
- nv_wr32(dev, 0x400130, 0xffffffff);
- nv_wr32(dev, 0x40011c, 0xffffffff);
- nv_wr32(dev, 0x400134, 0xffffffff);
- nv_wr32(dev, 0x400054, 0x34ce3464);
-
- ret = nvc0_graph_init_ctxctl(dev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-int
-nvc0_graph_isr_chid(struct drm_device *dev, u64 inst)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < pfifo->channels; i++) {
- chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->ramin)
- continue;
-
- if (inst == chan->ramin->vinst)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
-}
-
-static void
-nvc0_graph_ctxctl_isr(struct drm_device *dev)
-{
- u32 ustat = nv_rd32(dev, 0x409c18);
-
- if (ustat & 0x00000001)
- NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
- if (ustat & 0x00080000)
- NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
- if (ustat & ~0x00080001)
- NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
-
- nvc0_graph_ctxctl_debug(dev);
- nv_wr32(dev, 0x409c20, ustat);
-}
-
-static void
-nvc0_graph_isr(struct drm_device *dev)
-{
- u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
- u32 chid = nvc0_graph_isr_chid(dev, inst);
- u32 stat = nv_rd32(dev, 0x400100);
- u32 addr = nv_rd32(dev, 0x400704);
- u32 mthd = (addr & 0x00003ffc);
- u32 subc = (addr & 0x00070000) >> 16;
- u32 data = nv_rd32(dev, 0x400708);
- u32 code = nv_rd32(dev, 0x400110);
- u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
-
- if (stat & 0x00000010) {
- if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- }
- nv_wr32(dev, 0x400100, 0x00000010);
- stat &= ~0x00000010;
- }
-
- if (stat & 0x00000020) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- nv_wr32(dev, 0x400100, 0x00000020);
- stat &= ~0x00000020;
- }
-
- if (stat & 0x00100000) {
- NV_INFO(dev, "PGRAPH: DATA_ERROR [");
- nouveau_enum_print(nv50_data_error_names, code);
- printk("] ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- nv_wr32(dev, 0x400100, 0x00100000);
- stat &= ~0x00100000;
- }
-
- if (stat & 0x00200000) {
- u32 trap = nv_rd32(dev, 0x400108);
- NV_INFO(dev, "PGRAPH: TRAP ch %d status 0x%08x\n", chid, trap);
- nv_wr32(dev, 0x400108, trap);
- nv_wr32(dev, 0x400100, 0x00200000);
- stat &= ~0x00200000;
- }
-
- if (stat & 0x00080000) {
- nvc0_graph_ctxctl_isr(dev);
- nv_wr32(dev, 0x400100, 0x00080000);
- stat &= ~0x00080000;
- }
-
- if (stat) {
- NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
- nv_wr32(dev, 0x400100, stat);
- }
-
- nv_wr32(dev, 0x400500, 0x00010001);
-}
-
-static int
-nvc0_graph_create_fw(struct drm_device *dev, const char *fwname,
- struct nvc0_graph_fuc *fuc)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- const struct firmware *fw;
- char f[32];
- int ret;
-
- snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
- ret = request_firmware(&fw, f, &dev->pdev->dev);
- if (ret) {
- snprintf(f, sizeof(f), "nouveau/%s", fwname);
- ret = request_firmware(&fw, f, &dev->pdev->dev);
- if (ret) {
- NV_ERROR(dev, "failed to load %s\n", fwname);
- return ret;
- }
- }
-
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- release_firmware(fw);
- return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-static void
-nvc0_graph_destroy_fw(struct nvc0_graph_fuc *fuc)
-{
- if (fuc->data) {
- kfree(fuc->data);
- fuc->data = NULL;
- }
-}
-
-static void
-nvc0_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nvc0_graph_priv *priv = nv_engine(dev, engine);
-
- if (nouveau_ctxfw) {
- nvc0_graph_destroy_fw(&priv->fuc409c);
- nvc0_graph_destroy_fw(&priv->fuc409d);
- nvc0_graph_destroy_fw(&priv->fuc41ac);
- nvc0_graph_destroy_fw(&priv->fuc41ad);
- }
-
- nouveau_irq_unregister(dev, 12);
-
- nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
- nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
-
- if (priv->grctx_vals)
- kfree(priv->grctx_vals);
-
- NVOBJ_ENGINE_DEL(dev, GR);
- kfree(priv);
-}
-
-int
-nvc0_graph_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_graph_priv *priv;
- int ret, gpc, i;
- u32 fermi;
-
- fermi = nvc0_graph_class(dev);
- if (!fermi) {
- NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
- return 0;
- }
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.destroy = nvc0_graph_destroy;
- priv->base.init = nvc0_graph_init;
- priv->base.fini = nvc0_graph_fini;
- priv->base.context_new = nvc0_graph_context_new;
- priv->base.context_del = nvc0_graph_context_del;
- priv->base.object_new = nvc0_graph_object_new;
-
- NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
- nouveau_irq_register(dev, 12, nvc0_graph_isr);
-
- if (nouveau_ctxfw) {
- NV_INFO(dev, "PGRAPH: using external firmware\n");
- if (nvc0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
- nvc0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
- nvc0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
- nvc0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
- ret = 0;
- goto error;
- }
- }
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
- if (ret)
- goto error;
-
- for (i = 0; i < 0x1000; i += 4) {
- nv_wo32(priv->unk4188b4, i, 0x00000010);
- nv_wo32(priv->unk4188b8, i, 0x00000010);
- }
-
- priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
- priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- priv->tp_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
- priv->tp_total += priv->tp_nr[gpc];
- }
-
- /*XXX: these need figuring out... */
- switch (dev_priv->chipset) {
- case 0xc0:
- if (priv->tp_total == 11) { /* 465, 3/4/4/0, 4 */
- priv->magic_not_rop_nr = 0x07;
- } else
- if (priv->tp_total == 14) { /* 470, 3/3/4/4, 5 */
- priv->magic_not_rop_nr = 0x05;
- } else
- if (priv->tp_total == 15) { /* 480, 3/4/4/4, 6 */
- priv->magic_not_rop_nr = 0x06;
- }
- break;
- case 0xc3: /* 450, 4/0/0/0, 2 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xc4: /* 460, 3/4/0/0, 4 */
- priv->magic_not_rop_nr = 0x01;
- break;
- case 0xc1: /* 2/0/0/0, 1 */
- priv->magic_not_rop_nr = 0x01;
- break;
- case 0xc8: /* 4/4/3/4, 5 */
- priv->magic_not_rop_nr = 0x06;
- break;
- case 0xce: /* 4/4/0/0, 4 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xcf: /* 4/0/0/0, 3 */
- priv->magic_not_rop_nr = 0x03;
- break;
- case 0xd9: /* 1/0/0/0, 1 */
- priv->magic_not_rop_nr = 0x01;
- break;
- }
-
- if (!priv->magic_not_rop_nr) {
- NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
- priv->tp_nr[0], priv->tp_nr[1], priv->tp_nr[2],
- priv->tp_nr[3], priv->rop_nr);
- priv->magic_not_rop_nr = 0x00;
- }
-
- NVOBJ_CLASS(dev, 0x902d, GR); /* 2D */
- NVOBJ_CLASS(dev, 0x9039, GR); /* M2MF */
- NVOBJ_CLASS(dev, 0x9097, GR); /* 3D */
- if (fermi >= 0x9197)
- NVOBJ_CLASS(dev, 0x9197, GR); /* 3D (NVC1-) */
- if (fermi >= 0x9297)
- NVOBJ_CLASS(dev, 0x9297, GR); /* 3D (NVC8-) */
- NVOBJ_CLASS(dev, 0x90c0, GR); /* COMPUTE */
- return 0;
-
-error:
- nvc0_graph_destroy(dev, NVOBJ_ENGINE_GR);
- return ret;
-}
+++ /dev/null
-/* fuc microcode util functions for nvc0 PGRAPH
- *
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-define(`mmctx_data', `.b32 eval((($2 - 1) << 26) | $1)')
-define(`queue_init', `.skip eval((2 * 4) + ((8 * 4) * 2))')
-
-ifdef(`include_code', `
-// Error codes
-define(`E_BAD_COMMAND', 0x01)
-define(`E_CMD_OVERFLOW', 0x02)
-
-// Util macros to help with debugging ucode hangs etc
-define(`T_WAIT', 0)
-define(`T_MMCTX', 1)
-define(`T_STRWAIT', 2)
-define(`T_STRINIT', 3)
-define(`T_AUTO', 4)
-define(`T_CHAN', 5)
-define(`T_LOAD', 6)
-define(`T_SAVE', 7)
-define(`T_LCHAN', 8)
-define(`T_LCTXH', 9)
-
-define(`trace_set', `
- mov $r8 0x83c
- shl b32 $r8 6
- clear b32 $r9
- bset $r9 $1
- iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
-')
-
-define(`trace_clr', `
- mov $r8 0x85c
- shl b32 $r8 6
- clear b32 $r9
- bset $r9 $1
- iowr I[$r8 + 0x000] $r9 // CC_SCRATCH[7]
-')
-
-// queue_put - add request to queue
-//
-// In : $r13 queue pointer
-// $r14 command
-// $r15 data
-//
-queue_put:
- // make sure we have space..
- ld b32 $r8 D[$r13 + 0x0] // GET
- ld b32 $r9 D[$r13 + 0x4] // PUT
- xor $r8 8
- cmpu b32 $r8 $r9
- bra ne #queue_put_next
- mov $r15 E_CMD_OVERFLOW
- call #error
- ret
-
- // store cmd/data on queue
- queue_put_next:
- and $r8 $r9 7
- shl b32 $r8 3
- add b32 $r8 $r13
- add b32 $r8 8
- st b32 D[$r8 + 0x0] $r14
- st b32 D[$r8 + 0x4] $r15
-
- // update PUT
- add b32 $r9 1
- and $r9 0xf
- st b32 D[$r13 + 0x4] $r9
- ret
-
-// queue_get - fetch request from queue
-//
-// In : $r13 queue pointer
-//
-// Out: $p1 clear on success (data available)
-// $r14 command
-// $r15 data
-//
-queue_get:
- bset $flags $p1
- ld b32 $r8 D[$r13 + 0x0] // GET
- ld b32 $r9 D[$r13 + 0x4] // PUT
- cmpu b32 $r8 $r9
- bra e #queue_get_done
- // fetch first cmd/data pair
- and $r9 $r8 7
- shl b32 $r9 3
- add b32 $r9 $r13
- add b32 $r9 8
- ld b32 $r14 D[$r9 + 0x0]
- ld b32 $r15 D[$r9 + 0x4]
-
- // update GET
- add b32 $r8 1
- and $r8 0xf
- st b32 D[$r13 + 0x0] $r8
- bclr $flags $p1
-queue_get_done:
- ret
-
-// nv_rd32 - read 32-bit value from nv register
-//
-// In : $r14 register
-// Out: $r15 value
-//
-nv_rd32:
- mov $r11 0x728
- shl b32 $r11 6
- mov b32 $r12 $r14
- bset $r12 31 // MMIO_CTRL_PENDING
- iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
- nv_rd32_wait:
- iord $r12 I[$r11 + 0x000]
- xbit $r12 $r12 31
- bra ne #nv_rd32_wait
- mov $r10 6 // DONE_MMIO_RD
- call #wait_doneo
- iord $r15 I[$r11 + 0x100] // MMIO_RDVAL
- ret
-
-// nv_wr32 - write 32-bit value to nv register
-//
-// In : $r14 register
-// $r15 value
-//
-nv_wr32:
- mov $r11 0x728
- shl b32 $r11 6
- iowr I[$r11 + 0x200] $r15 // MMIO_WRVAL
- mov b32 $r12 $r14
- bset $r12 31 // MMIO_CTRL_PENDING
- bset $r12 30 // MMIO_CTRL_WRITE
- iowr I[$r11 + 0x000] $r12 // MMIO_CTRL
- nv_wr32_wait:
- iord $r12 I[$r11 + 0x000]
- xbit $r12 $r12 31
- bra ne #nv_wr32_wait
- ret
-
-// (re)set watchdog timer
-//
-// In : $r15 timeout
-//
-watchdog_reset:
- mov $r8 0x430
- shl b32 $r8 6
- bset $r15 31
- iowr I[$r8 + 0x000] $r15
- ret
-
-// clear watchdog timer
-watchdog_clear:
- mov $r8 0x430
- shl b32 $r8 6
- iowr I[$r8 + 0x000] $r0
- ret
-
-// wait_done{z,o} - wait on FUC_DONE bit to become clear/set
-//
-// In : $r10 bit to wait on
-//
-define(`wait_done', `
-$1:
- trace_set(T_WAIT);
- mov $r8 0x818
- shl b32 $r8 6
- iowr I[$r8 + 0x000] $r10 // CC_SCRATCH[6] = wait bit
- wait_done_$1:
- mov $r8 0x400
- shl b32 $r8 6
- iord $r8 I[$r8 + 0x000] // DONE
- xbit $r8 $r8 $r10
- bra $2 #wait_done_$1
- trace_clr(T_WAIT)
- ret
-')
-wait_done(wait_donez, ne)
-wait_done(wait_doneo, e)
-
-// mmctx_size - determine size of a mmio list transfer
-//
-// In : $r14 mmio list head
-// $r15 mmio list tail
-// Out: $r15 transfer size (in bytes)
-//
-mmctx_size:
- clear b32 $r9
- nv_mmctx_size_loop:
- ld b32 $r8 D[$r14]
- shr b32 $r8 26
- add b32 $r8 1
- shl b32 $r8 2
- add b32 $r9 $r8
- add b32 $r14 4
- cmpu b32 $r14 $r15
- bra ne #nv_mmctx_size_loop
- mov b32 $r15 $r9
- ret
-
-// mmctx_xfer - execute a list of mmio transfers
-//
-// In : $r10 flags
-// bit 0: direction (0 = save, 1 = load)
-// bit 1: set if first transfer
-// bit 2: set if last transfer
-// $r11 base
-// $r12 mmio list head
-// $r13 mmio list tail
-// $r14 multi_stride
-// $r15 multi_mask
-//
-mmctx_xfer:
- trace_set(T_MMCTX)
- mov $r8 0x710
- shl b32 $r8 6
- clear b32 $r9
- or $r11 $r11
- bra e #mmctx_base_disabled
- iowr I[$r8 + 0x000] $r11 // MMCTX_BASE
- bset $r9 0 // BASE_EN
- mmctx_base_disabled:
- or $r14 $r14
- bra e #mmctx_multi_disabled
- iowr I[$r8 + 0x200] $r14 // MMCTX_MULTI_STRIDE
- iowr I[$r8 + 0x300] $r15 // MMCTX_MULTI_MASK
- bset $r9 1 // MULTI_EN
- mmctx_multi_disabled:
- add b32 $r8 0x100
-
- xbit $r11 $r10 0
- shl b32 $r11 16 // DIR
- bset $r11 12 // QLIMIT = 0x10
- xbit $r14 $r10 1
- shl b32 $r14 17
- or $r11 $r14 // START_TRIGGER
- iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
-
- // loop over the mmio list, and send requests to the hw
- mmctx_exec_loop:
- // wait for space in mmctx queue
- mmctx_wait_free:
- iord $r14 I[$r8 + 0x000] // MMCTX_CTRL
- and $r14 0x1f
- bra e #mmctx_wait_free
-
- // queue up an entry
- ld b32 $r14 D[$r12]
- or $r14 $r9
- iowr I[$r8 + 0x300] $r14
- add b32 $r12 4
- cmpu b32 $r12 $r13
- bra ne #mmctx_exec_loop
-
- xbit $r11 $r10 2
- bra ne #mmctx_stop
- // wait for queue to empty
- mmctx_fini_wait:
- iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
- and $r11 0x1f
- cmpu b32 $r11 0x10
- bra ne #mmctx_fini_wait
- mov $r10 2 // DONE_MMCTX
- call #wait_donez
- bra #mmctx_done
- mmctx_stop:
- xbit $r11 $r10 0
- shl b32 $r11 16 // DIR
- bset $r11 12 // QLIMIT = 0x10
- bset $r11 18 // STOP_TRIGGER
- iowr I[$r8 + 0x000] $r11 // MMCTX_CTRL
- mmctx_stop_wait:
- // wait for STOP_TRIGGER to clear
- iord $r11 I[$r8 + 0x000] // MMCTX_CTRL
- xbit $r11 $r11 18
- bra ne #mmctx_stop_wait
- mmctx_done:
- trace_clr(T_MMCTX)
- ret
-
-// Wait for DONE_STRAND
-//
-strand_wait:
- push $r10
- mov $r10 2
- call #wait_donez
- pop $r10
- ret
-
-// unknown - call before issuing strand commands
-//
-strand_pre:
- mov $r8 0x4afc
- sethi $r8 0x20000
- mov $r9 0xc
- iowr I[$r8] $r9
- call #strand_wait
- ret
-
-// unknown - call after issuing strand commands
-//
-strand_post:
- mov $r8 0x4afc
- sethi $r8 0x20000
- mov $r9 0xd
- iowr I[$r8] $r9
- call #strand_wait
- ret
-
-// Selects strand set?!
-//
-// In: $r14 id
-//
-strand_set:
- mov $r10 0x4ffc
- sethi $r10 0x20000
- sub b32 $r11 $r10 0x500
- mov $r12 0xf
- iowr I[$r10 + 0x000] $r12 // 0x93c = 0xf
- mov $r12 0xb
- iowr I[$r11 + 0x000] $r12 // 0x928 = 0xb
- call #strand_wait
- iowr I[$r10 + 0x000] $r14 // 0x93c = <id>
- mov $r12 0xa
- iowr I[$r11 + 0x000] $r12 // 0x928 = 0xa
- call #strand_wait
- ret
-
-// Initialise strand context data
-//
-// In : $r15 context base
-// Out: $r15 context size (in bytes)
-//
-// Strandset(?) 3 hardcoded currently
-//
-strand_ctx_init:
- trace_set(T_STRINIT)
- call #strand_pre
- mov $r14 3
- call #strand_set
- mov $r10 0x46fc
- sethi $r10 0x20000
- add b32 $r11 $r10 0x400
- iowr I[$r10 + 0x100] $r0 // STRAND_FIRST_GENE = 0
- mov $r12 1
- iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_FIRST_GENE
- call #strand_wait
- sub b32 $r12 $r0 1
- iowr I[$r10 + 0x000] $r12 // STRAND_GENE_CNT = 0xffffffff
- mov $r12 2
- iowr I[$r11 + 0x000] $r12 // STRAND_CMD = LATCH_GENE_CNT
- call #strand_wait
- call #strand_post
-
- // read the size of each strand, poke the context offset of
- // each into STRAND_{SAVE,LOAD}_SWBASE now, no need to worry
- // about it later then.
- mov $r8 0x880
- shl b32 $r8 6
- iord $r9 I[$r8 + 0x000] // STRANDS
- add b32 $r8 0x2200
- shr b32 $r14 $r15 8
- ctx_init_strand_loop:
- iowr I[$r8 + 0x000] $r14 // STRAND_SAVE_SWBASE
- iowr I[$r8 + 0x100] $r14 // STRAND_LOAD_SWBASE
- iord $r10 I[$r8 + 0x200] // STRAND_SIZE
- shr b32 $r10 6
- add b32 $r10 1
- add b32 $r14 $r10
- add b32 $r8 4
- sub b32 $r9 1
- bra ne #ctx_init_strand_loop
-
- shl b32 $r14 8
- sub b32 $r15 $r14 $r15
- trace_clr(T_STRINIT)
- ret
-')
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NVC0_GRAPH_H__
-#define __NVC0_GRAPH_H__
-
-#define GPC_MAX 4
-#define TP_MAX 32
-
-#define ROP_BCAST(r) (0x408800 + (r))
-#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
-#define GPC_BCAST(r) (0x418000 + (r))
-#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
-#define TP_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
-
-struct nvc0_graph_fuc {
- u32 *data;
- u32 size;
-};
-
-struct nvc0_graph_priv {
- struct nouveau_exec_engine base;
-
- struct nvc0_graph_fuc fuc409c;
- struct nvc0_graph_fuc fuc409d;
- struct nvc0_graph_fuc fuc41ac;
- struct nvc0_graph_fuc fuc41ad;
-
- u8 gpc_nr;
- u8 rop_nr;
- u8 tp_nr[GPC_MAX];
- u8 tp_total;
-
- u32 grctx_size;
- u32 *grctx_vals;
- struct nouveau_gpuobj *unk4188b4;
- struct nouveau_gpuobj *unk4188b8;
-
- u8 magic_not_rop_nr;
-};
-
-struct nvc0_graph_chan {
- struct nouveau_gpuobj *grctx;
- struct nouveau_gpuobj *unk408004; /* 0x418810 too */
- struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
- struct nouveau_gpuobj *unk418810; /* 0x419848 too */
- struct nouveau_gpuobj *mmio;
- int mmio_nr;
-};
-
-int nvc0_grctx_generate(struct nouveau_channel *);
-
-/* nvc0_graph.c uses this also to determine supported chipsets */
-static inline u32
-nvc0_graph_class(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- switch (dev_priv->chipset) {
- case 0xc0:
- case 0xc3:
- case 0xc4:
- case 0xce: /* guess, mmio trace shows only 0x9097 state */
- case 0xcf: /* guess, mmio trace shows only 0x9097 state */
- return 0x9097;
- case 0xc1:
- return 0x9197;
- case 0xc8:
- case 0xd9:
- return 0x9297;
- default:
- return 0;
- }
-}
-
-#endif
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nvc0_graph.h"
-
-static void
-nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
-{
- nv_wr32(dev, 0x400204, data);
- nv_wr32(dev, 0x400200, icmd);
- while (nv_rd32(dev, 0x400700) & 2) {}
-}
-
-static void
-nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
-{
- nv_wr32(dev, 0x40448c, data);
- nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
-}
-
-static void
-nvc0_grctx_generate_9097(struct drm_device *dev)
-{
- u32 fermi = nvc0_graph_class(dev);
- u32 mthd;
-
- nv_mthd(dev, 0x9097, 0x0800, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0840, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0880, 0x00000000);
- nv_mthd(dev, 0x9097, 0x08c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0900, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0940, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0980, 0x00000000);
- nv_mthd(dev, 0x9097, 0x09c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0804, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0844, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0884, 0x00000000);
- nv_mthd(dev, 0x9097, 0x08c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0904, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0944, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0984, 0x00000000);
- nv_mthd(dev, 0x9097, 0x09c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0808, 0x00000400);
- nv_mthd(dev, 0x9097, 0x0848, 0x00000400);
- nv_mthd(dev, 0x9097, 0x0888, 0x00000400);
- nv_mthd(dev, 0x9097, 0x08c8, 0x00000400);
- nv_mthd(dev, 0x9097, 0x0908, 0x00000400);
- nv_mthd(dev, 0x9097, 0x0948, 0x00000400);
- nv_mthd(dev, 0x9097, 0x0988, 0x00000400);
- nv_mthd(dev, 0x9097, 0x09c8, 0x00000400);
- nv_mthd(dev, 0x9097, 0x080c, 0x00000300);
- nv_mthd(dev, 0x9097, 0x084c, 0x00000300);
- nv_mthd(dev, 0x9097, 0x088c, 0x00000300);
- nv_mthd(dev, 0x9097, 0x08cc, 0x00000300);
- nv_mthd(dev, 0x9097, 0x090c, 0x00000300);
- nv_mthd(dev, 0x9097, 0x094c, 0x00000300);
- nv_mthd(dev, 0x9097, 0x098c, 0x00000300);
- nv_mthd(dev, 0x9097, 0x09cc, 0x00000300);
- nv_mthd(dev, 0x9097, 0x0810, 0x000000cf);
- nv_mthd(dev, 0x9097, 0x0850, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0890, 0x00000000);
- nv_mthd(dev, 0x9097, 0x08d0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0910, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0950, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0990, 0x00000000);
- nv_mthd(dev, 0x9097, 0x09d0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0814, 0x00000040);
- nv_mthd(dev, 0x9097, 0x0854, 0x00000040);
- nv_mthd(dev, 0x9097, 0x0894, 0x00000040);
- nv_mthd(dev, 0x9097, 0x08d4, 0x00000040);
- nv_mthd(dev, 0x9097, 0x0914, 0x00000040);
- nv_mthd(dev, 0x9097, 0x0954, 0x00000040);
- nv_mthd(dev, 0x9097, 0x0994, 0x00000040);
- nv_mthd(dev, 0x9097, 0x09d4, 0x00000040);
- nv_mthd(dev, 0x9097, 0x0818, 0x00000001);
- nv_mthd(dev, 0x9097, 0x0858, 0x00000001);
- nv_mthd(dev, 0x9097, 0x0898, 0x00000001);
- nv_mthd(dev, 0x9097, 0x08d8, 0x00000001);
- nv_mthd(dev, 0x9097, 0x0918, 0x00000001);
- nv_mthd(dev, 0x9097, 0x0958, 0x00000001);
- nv_mthd(dev, 0x9097, 0x0998, 0x00000001);
- nv_mthd(dev, 0x9097, 0x09d8, 0x00000001);
- nv_mthd(dev, 0x9097, 0x081c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x085c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x089c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x08dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x091c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x095c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x099c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x09dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0820, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0860, 0x00000000);
- nv_mthd(dev, 0x9097, 0x08a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x08e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0920, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0960, 0x00000000);
- nv_mthd(dev, 0x9097, 0x09a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x09e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2700, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2720, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2740, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2760, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2780, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2704, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2724, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2744, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2764, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2784, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2708, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2728, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2748, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2768, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2788, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27a8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27c8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x270c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x272c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x274c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x276c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x278c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27ac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x27ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2710, 0x00014000);
- nv_mthd(dev, 0x9097, 0x2730, 0x00014000);
- nv_mthd(dev, 0x9097, 0x2750, 0x00014000);
- nv_mthd(dev, 0x9097, 0x2770, 0x00014000);
- nv_mthd(dev, 0x9097, 0x2790, 0x00014000);
- nv_mthd(dev, 0x9097, 0x27b0, 0x00014000);
- nv_mthd(dev, 0x9097, 0x27d0, 0x00014000);
- nv_mthd(dev, 0x9097, 0x27f0, 0x00014000);
- nv_mthd(dev, 0x9097, 0x2714, 0x00000040);
- nv_mthd(dev, 0x9097, 0x2734, 0x00000040);
- nv_mthd(dev, 0x9097, 0x2754, 0x00000040);
- nv_mthd(dev, 0x9097, 0x2774, 0x00000040);
- nv_mthd(dev, 0x9097, 0x2794, 0x00000040);
- nv_mthd(dev, 0x9097, 0x27b4, 0x00000040);
- nv_mthd(dev, 0x9097, 0x27d4, 0x00000040);
- nv_mthd(dev, 0x9097, 0x27f4, 0x00000040);
- nv_mthd(dev, 0x9097, 0x1c00, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c10, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c20, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c30, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c40, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c50, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c60, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c70, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c80, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c90, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ca0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cb0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cc0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cd0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ce0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cf0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c04, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c14, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c24, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c34, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c44, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c54, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c64, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c74, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c84, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c94, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ca4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cb4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cc4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cd4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ce4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cf4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c08, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c18, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c28, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c38, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c48, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c58, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c68, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c78, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c88, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c98, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ca8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cb8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cc8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cd8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ce8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cf8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c0c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c1c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c2c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c3c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c4c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c5c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c6c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c7c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c8c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1c9c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cbc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ccc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cdc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1cfc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d00, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d10, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d20, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d30, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d40, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d50, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d60, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d70, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d80, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d90, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1da0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1db0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dc0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dd0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1de0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1df0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d04, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d14, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d24, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d34, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d44, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d54, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d64, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d74, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d84, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d94, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1da4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1db4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dc4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dd4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1de4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1df4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d08, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d18, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d28, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d38, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d48, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d58, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d68, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d78, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d88, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d98, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1da8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1db8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dc8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dd8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1de8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1df8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d0c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d1c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d2c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d3c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d4c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d5c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d6c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d7c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d8c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1d9c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dbc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dcc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ddc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1dfc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f00, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f08, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f10, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f18, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f20, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f28, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f30, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f38, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f40, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f48, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f50, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f58, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f60, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f68, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f70, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f78, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f04, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f0c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f14, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f1c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f24, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f2c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f34, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f3c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f44, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f4c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f54, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f5c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f64, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f6c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f74, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f7c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f80, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f88, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f90, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f98, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fa0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fa8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fb0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fb8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fc0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fc8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fd0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fd8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fe0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fe8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ff0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ff8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f84, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f8c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f94, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1f9c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fa4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fb4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fbc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fc4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fcc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fd4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fdc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fe4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1fec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ff4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1ffc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2200, 0x00000022);
- nv_mthd(dev, 0x9097, 0x2210, 0x00000022);
- nv_mthd(dev, 0x9097, 0x2220, 0x00000022);
- nv_mthd(dev, 0x9097, 0x2230, 0x00000022);
- nv_mthd(dev, 0x9097, 0x2240, 0x00000022);
- nv_mthd(dev, 0x9097, 0x2000, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2040, 0x00000011);
- nv_mthd(dev, 0x9097, 0x2080, 0x00000020);
- nv_mthd(dev, 0x9097, 0x20c0, 0x00000030);
- nv_mthd(dev, 0x9097, 0x2100, 0x00000040);
- nv_mthd(dev, 0x9097, 0x2140, 0x00000051);
- nv_mthd(dev, 0x9097, 0x200c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x204c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x208c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x20cc, 0x00000001);
- nv_mthd(dev, 0x9097, 0x210c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x214c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x2010, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2050, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2090, 0x00000001);
- nv_mthd(dev, 0x9097, 0x20d0, 0x00000002);
- nv_mthd(dev, 0x9097, 0x2110, 0x00000003);
- nv_mthd(dev, 0x9097, 0x2150, 0x00000004);
- nv_mthd(dev, 0x9097, 0x0380, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0384, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0388, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03a8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03c8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x038c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03ac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x03ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0700, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0710, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0720, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0730, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0704, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0714, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0724, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0734, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0708, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0718, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0728, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0738, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2800, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2804, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2808, 0x00000000);
- nv_mthd(dev, 0x9097, 0x280c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2810, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2814, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2818, 0x00000000);
- nv_mthd(dev, 0x9097, 0x281c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2820, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2824, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2828, 0x00000000);
- nv_mthd(dev, 0x9097, 0x282c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2830, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2834, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2838, 0x00000000);
- nv_mthd(dev, 0x9097, 0x283c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2840, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2844, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2848, 0x00000000);
- nv_mthd(dev, 0x9097, 0x284c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2850, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2854, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2858, 0x00000000);
- nv_mthd(dev, 0x9097, 0x285c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2860, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2864, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2868, 0x00000000);
- nv_mthd(dev, 0x9097, 0x286c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2870, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2874, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2878, 0x00000000);
- nv_mthd(dev, 0x9097, 0x287c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2880, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2884, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2888, 0x00000000);
- nv_mthd(dev, 0x9097, 0x288c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2890, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2894, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2898, 0x00000000);
- nv_mthd(dev, 0x9097, 0x289c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28a8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28ac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28b0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28b4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28b8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28bc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28c8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28d0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28d4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28d8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28f0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28f4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28f8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x28fc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2900, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2904, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2908, 0x00000000);
- nv_mthd(dev, 0x9097, 0x290c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2910, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2914, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2918, 0x00000000);
- nv_mthd(dev, 0x9097, 0x291c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2920, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2924, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2928, 0x00000000);
- nv_mthd(dev, 0x9097, 0x292c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2930, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2934, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2938, 0x00000000);
- nv_mthd(dev, 0x9097, 0x293c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2940, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2944, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2948, 0x00000000);
- nv_mthd(dev, 0x9097, 0x294c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2950, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2954, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2958, 0x00000000);
- nv_mthd(dev, 0x9097, 0x295c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2960, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2964, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2968, 0x00000000);
- nv_mthd(dev, 0x9097, 0x296c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2970, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2974, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2978, 0x00000000);
- nv_mthd(dev, 0x9097, 0x297c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2980, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2984, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2988, 0x00000000);
- nv_mthd(dev, 0x9097, 0x298c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2990, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2994, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2998, 0x00000000);
- nv_mthd(dev, 0x9097, 0x299c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29a8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29ac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29b0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29b4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29b8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29bc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29c8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29d0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29d4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29d8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29f0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29f4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29f8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x29fc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a00, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a20, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a40, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a60, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a80, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0aa0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ac0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ae0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b00, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b20, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b40, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b60, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b80, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ba0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bc0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0be0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a04, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a24, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a44, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a64, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a84, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0aa4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ac4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ae4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b04, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b24, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b44, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b64, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b84, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ba4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bc4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0be4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a08, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a28, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a48, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a68, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a88, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0aa8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ac8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ae8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b08, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b28, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b48, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b68, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b88, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ba8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bc8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0be8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a0c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a2c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a4c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a6c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a8c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0aac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0acc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0aec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b0c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b2c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b4c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b6c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b8c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bcc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a10, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a30, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a50, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a70, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a90, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ab0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ad0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0af0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b10, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b30, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b50, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b70, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b90, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bb0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bd0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bf0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a14, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a34, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a54, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a74, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0a94, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ab4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ad4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0af4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b14, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b34, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b54, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b74, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0b94, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bb4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bd4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0bf4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c00, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c10, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c20, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c30, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c40, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c50, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c60, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c70, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c80, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c90, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ca0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cb0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cc0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cd0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ce0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cf0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c04, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c14, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c24, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c34, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c44, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c54, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c64, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c74, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c84, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c94, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ca4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cb4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cc4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cd4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ce4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cf4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c08, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c18, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c28, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c38, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c48, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c58, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c68, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c78, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c88, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c98, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ca8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cb8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cc8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cd8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ce8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0cf8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0c0c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0c1c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0c2c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0c3c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0c4c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0c5c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0c6c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0c7c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0c8c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0c9c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0cac, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0cbc, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0ccc, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0cdc, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0cec, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0cfc, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0d00, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d08, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d10, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d18, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d20, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d28, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d30, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d38, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d04, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d0c, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d14, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d1c, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d24, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d2c, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d34, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d3c, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e00, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0e10, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0e20, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0e30, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0e40, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0e50, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0e60, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0e70, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0e80, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0e90, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ea0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0eb0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ec0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ed0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ee0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ef0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0e04, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e14, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e24, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e34, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e44, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e54, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e64, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e74, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e84, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e94, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0ea4, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0eb4, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0ec4, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0ed4, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0ee4, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0ef4, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e08, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e18, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e28, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e38, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e48, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e58, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e68, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e78, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e88, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0e98, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0ea8, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0eb8, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0ec8, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0ed8, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0ee8, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0ef8, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d40, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d48, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d50, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d58, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d44, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d4c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d54, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d5c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1e00, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e20, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e40, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e60, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e80, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ea0, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ec0, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ee0, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e04, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e24, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e44, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e64, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e84, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ea4, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ec4, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ee4, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e08, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1e28, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1e48, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1e68, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1e88, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1ea8, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1ec8, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1ee8, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1e0c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e2c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e4c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e6c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e8c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1eac, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ecc, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1eec, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e10, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e30, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e50, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e70, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e90, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1eb0, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ed0, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ef0, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e14, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1e34, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1e54, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1e74, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1e94, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1eb4, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1ed4, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1ef4, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1e18, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e38, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e58, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e78, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1e98, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1eb8, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ed8, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1ef8, 0x00000001);
- if (fermi == 0x9097) {
- for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
- nv_mthd(dev, 0x9097, mthd, 0x00000000);
- }
- nv_mthd(dev, 0x9097, 0x030c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1944, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1514, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d68, 0x0000ffff);
- nv_mthd(dev, 0x9097, 0x121c, 0x0fac6881);
- nv_mthd(dev, 0x9097, 0x0fac, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1538, 0x00000001);
- nv_mthd(dev, 0x9097, 0x0fe0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0fe4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0fe8, 0x00000014);
- nv_mthd(dev, 0x9097, 0x0fec, 0x00000040);
- nv_mthd(dev, 0x9097, 0x0ff0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x179c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1228, 0x00000400);
- nv_mthd(dev, 0x9097, 0x122c, 0x00000300);
- nv_mthd(dev, 0x9097, 0x1230, 0x00010001);
- nv_mthd(dev, 0x9097, 0x07f8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x15b4, 0x00000001);
- nv_mthd(dev, 0x9097, 0x15cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1534, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0fb0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x15d0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x153c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x16b4, 0x00000003);
- nv_mthd(dev, 0x9097, 0x0fbc, 0x0000ffff);
- nv_mthd(dev, 0x9097, 0x0fc0, 0x0000ffff);
- nv_mthd(dev, 0x9097, 0x0fc4, 0x0000ffff);
- nv_mthd(dev, 0x9097, 0x0fc8, 0x0000ffff);
- nv_mthd(dev, 0x9097, 0x0df8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0dfc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1948, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1970, 0x00000001);
- nv_mthd(dev, 0x9097, 0x161c, 0x000009f0);
- nv_mthd(dev, 0x9097, 0x0dcc, 0x00000010);
- nv_mthd(dev, 0x9097, 0x163c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x15e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1160, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1164, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1168, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x116c, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1170, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1174, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1178, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x117c, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1180, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1184, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1188, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x118c, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1190, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1194, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1198, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x119c, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11a0, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11a4, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11a8, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11ac, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11b0, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11b4, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11b8, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11bc, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11c0, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11c4, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11c8, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11cc, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11d0, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11d4, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11d8, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x11dc, 0x25e00040);
- nv_mthd(dev, 0x9097, 0x1880, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1884, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1888, 0x00000000);
- nv_mthd(dev, 0x9097, 0x188c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1890, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1894, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1898, 0x00000000);
- nv_mthd(dev, 0x9097, 0x189c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18a8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18ac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18b0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18b4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18b8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18bc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18c8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18d0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18d4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18d8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18e0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18f0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18f4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18f8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x18fc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0f84, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0f88, 0x00000000);
- nv_mthd(dev, 0x9097, 0x17c8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x17cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x17d0, 0x000000ff);
- nv_mthd(dev, 0x9097, 0x17d4, 0xffffffff);
- nv_mthd(dev, 0x9097, 0x17d8, 0x00000002);
- nv_mthd(dev, 0x9097, 0x17dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x15f4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x15f8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1434, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1438, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d74, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0dec, 0x00000001);
- nv_mthd(dev, 0x9097, 0x13a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1318, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1644, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0748, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0de8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1648, 0x00000000);
- nv_mthd(dev, 0x9097, 0x12a4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1120, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1124, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1128, 0x00000000);
- nv_mthd(dev, 0x9097, 0x112c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1118, 0x00000000);
- nv_mthd(dev, 0x9097, 0x164c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1658, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1910, 0x00000290);
- nv_mthd(dev, 0x9097, 0x1518, 0x00000000);
- nv_mthd(dev, 0x9097, 0x165c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1520, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1604, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1570, 0x00000000);
- nv_mthd(dev, 0x9097, 0x13b0, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x13b4, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x020c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1670, 0x30201000);
- nv_mthd(dev, 0x9097, 0x1674, 0x70605040);
- nv_mthd(dev, 0x9097, 0x1678, 0xb8a89888);
- nv_mthd(dev, 0x9097, 0x167c, 0xf8e8d8c8);
- nv_mthd(dev, 0x9097, 0x166c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1680, 0x00ffff00);
- nv_mthd(dev, 0x9097, 0x12d0, 0x00000003);
- nv_mthd(dev, 0x9097, 0x12d4, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1684, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1688, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0dac, 0x00001b02);
- nv_mthd(dev, 0x9097, 0x0db0, 0x00001b02);
- nv_mthd(dev, 0x9097, 0x0db4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x168c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x15bc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x156c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x187c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1110, 0x00000001);
- nv_mthd(dev, 0x9097, 0x0dc0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0dc4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0dc8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1234, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1690, 0x00000000);
- nv_mthd(dev, 0x9097, 0x12ac, 0x00000001);
- nv_mthd(dev, 0x9097, 0x02c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0790, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0794, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0798, 0x00000000);
- nv_mthd(dev, 0x9097, 0x079c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x07a0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x077c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1000, 0x00000010);
- nv_mthd(dev, 0x9097, 0x10fc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1290, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0218, 0x00000010);
- nv_mthd(dev, 0x9097, 0x12d8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x12dc, 0x00000010);
- nv_mthd(dev, 0x9097, 0x0d94, 0x00000001);
- nv_mthd(dev, 0x9097, 0x155c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1560, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1564, 0x00001fff);
- nv_mthd(dev, 0x9097, 0x1574, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1578, 0x00000000);
- nv_mthd(dev, 0x9097, 0x157c, 0x003fffff);
- nv_mthd(dev, 0x9097, 0x1354, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1664, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1610, 0x00000012);
- nv_mthd(dev, 0x9097, 0x1608, 0x00000000);
- nv_mthd(dev, 0x9097, 0x160c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x162c, 0x00000003);
- nv_mthd(dev, 0x9097, 0x0210, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0320, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0324, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0328, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x032c, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0330, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0334, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0338, 0x3f800000);
- nv_mthd(dev, 0x9097, 0x0750, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0760, 0x39291909);
- nv_mthd(dev, 0x9097, 0x0764, 0x79695949);
- nv_mthd(dev, 0x9097, 0x0768, 0xb9a99989);
- nv_mthd(dev, 0x9097, 0x076c, 0xf9e9d9c9);
- nv_mthd(dev, 0x9097, 0x0770, 0x30201000);
- nv_mthd(dev, 0x9097, 0x0774, 0x70605040);
- nv_mthd(dev, 0x9097, 0x0778, 0x00009080);
- nv_mthd(dev, 0x9097, 0x0780, 0x39291909);
- nv_mthd(dev, 0x9097, 0x0784, 0x79695949);
- nv_mthd(dev, 0x9097, 0x0788, 0xb9a99989);
- nv_mthd(dev, 0x9097, 0x078c, 0xf9e9d9c9);
- nv_mthd(dev, 0x9097, 0x07d0, 0x30201000);
- nv_mthd(dev, 0x9097, 0x07d4, 0x70605040);
- nv_mthd(dev, 0x9097, 0x07d8, 0x00009080);
- nv_mthd(dev, 0x9097, 0x037c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x0740, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0744, 0x00000000);
- nv_mthd(dev, 0x9097, 0x2600, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1918, 0x00000000);
- nv_mthd(dev, 0x9097, 0x191c, 0x00000900);
- nv_mthd(dev, 0x9097, 0x1920, 0x00000405);
- nv_mthd(dev, 0x9097, 0x1308, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1924, 0x00000000);
- nv_mthd(dev, 0x9097, 0x13ac, 0x00000000);
- nv_mthd(dev, 0x9097, 0x192c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x193c, 0x00002c1c);
- nv_mthd(dev, 0x9097, 0x0d7c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0f8c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x02c0, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1510, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1940, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ff4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0ff8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x194c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1950, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1968, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1590, 0x0000003f);
- nv_mthd(dev, 0x9097, 0x07e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x07ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x07f0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x07f4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x196c, 0x00000011);
- nv_mthd(dev, 0x9097, 0x197c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0fcc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0fd0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x02d8, 0x00000040);
- nv_mthd(dev, 0x9097, 0x1980, 0x00000080);
- nv_mthd(dev, 0x9097, 0x1504, 0x00000080);
- nv_mthd(dev, 0x9097, 0x1984, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0300, 0x00000001);
- nv_mthd(dev, 0x9097, 0x13a8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x12ec, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1310, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1314, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1380, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1384, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1388, 0x00000001);
- nv_mthd(dev, 0x9097, 0x138c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1390, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1394, 0x00000000);
- nv_mthd(dev, 0x9097, 0x139c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1398, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1594, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1598, 0x00000001);
- nv_mthd(dev, 0x9097, 0x159c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x15a0, 0x00000001);
- nv_mthd(dev, 0x9097, 0x15a4, 0x00000001);
- nv_mthd(dev, 0x9097, 0x0f54, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0f58, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0f5c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x19bc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0f9c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0fa0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x12cc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x12e8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x130c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1360, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1364, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1368, 0x00000000);
- nv_mthd(dev, 0x9097, 0x136c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1370, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1374, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1378, 0x00000000);
- nv_mthd(dev, 0x9097, 0x137c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x133c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1340, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1344, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1348, 0x00000001);
- nv_mthd(dev, 0x9097, 0x134c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1350, 0x00000002);
- nv_mthd(dev, 0x9097, 0x1358, 0x00000001);
- nv_mthd(dev, 0x9097, 0x12e4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x131c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1320, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1324, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1328, 0x00000000);
- nv_mthd(dev, 0x9097, 0x19c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1140, 0x00000000);
- nv_mthd(dev, 0x9097, 0x19c4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x19c8, 0x00001500);
- nv_mthd(dev, 0x9097, 0x135c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0f90, 0x00000000);
- nv_mthd(dev, 0x9097, 0x19e0, 0x00000001);
- nv_mthd(dev, 0x9097, 0x19e4, 0x00000001);
- nv_mthd(dev, 0x9097, 0x19e8, 0x00000001);
- nv_mthd(dev, 0x9097, 0x19ec, 0x00000001);
- nv_mthd(dev, 0x9097, 0x19f0, 0x00000001);
- nv_mthd(dev, 0x9097, 0x19f4, 0x00000001);
- nv_mthd(dev, 0x9097, 0x19f8, 0x00000001);
- nv_mthd(dev, 0x9097, 0x19fc, 0x00000001);
- nv_mthd(dev, 0x9097, 0x19cc, 0x00000001);
- nv_mthd(dev, 0x9097, 0x15b8, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1a00, 0x00001111);
- nv_mthd(dev, 0x9097, 0x1a04, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1a08, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1a0c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1a10, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1a14, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1a18, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1a1c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d6c, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x0d70, 0xffff0000);
- nv_mthd(dev, 0x9097, 0x10f8, 0x00001010);
- nv_mthd(dev, 0x9097, 0x0d80, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d84, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d88, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d8c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0d90, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0da0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1508, 0x80000000);
- nv_mthd(dev, 0x9097, 0x150c, 0x40000000);
- nv_mthd(dev, 0x9097, 0x1668, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0318, 0x00000008);
- nv_mthd(dev, 0x9097, 0x031c, 0x00000008);
- nv_mthd(dev, 0x9097, 0x0d9c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x07dc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x074c, 0x00000055);
- nv_mthd(dev, 0x9097, 0x1420, 0x00000003);
- nv_mthd(dev, 0x9097, 0x17bc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x17c0, 0x00000000);
- nv_mthd(dev, 0x9097, 0x17c4, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1008, 0x00000008);
- nv_mthd(dev, 0x9097, 0x100c, 0x00000040);
- nv_mthd(dev, 0x9097, 0x1010, 0x0000012c);
- nv_mthd(dev, 0x9097, 0x0d60, 0x00000040);
- nv_mthd(dev, 0x9097, 0x075c, 0x00000003);
- nv_mthd(dev, 0x9097, 0x1018, 0x00000020);
- nv_mthd(dev, 0x9097, 0x101c, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1020, 0x00000020);
- nv_mthd(dev, 0x9097, 0x1024, 0x00000001);
- nv_mthd(dev, 0x9097, 0x1444, 0x00000000);
- nv_mthd(dev, 0x9097, 0x1448, 0x00000000);
- nv_mthd(dev, 0x9097, 0x144c, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0360, 0x20164010);
- nv_mthd(dev, 0x9097, 0x0364, 0x00000020);
- nv_mthd(dev, 0x9097, 0x0368, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0de4, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0204, 0x00000006);
- nv_mthd(dev, 0x9097, 0x0208, 0x00000000);
- nv_mthd(dev, 0x9097, 0x02cc, 0x003fffff);
- nv_mthd(dev, 0x9097, 0x02d0, 0x00000c48);
- nv_mthd(dev, 0x9097, 0x1220, 0x00000005);
- nv_mthd(dev, 0x9097, 0x0fdc, 0x00000000);
- nv_mthd(dev, 0x9097, 0x0f98, 0x00300008);
- nv_mthd(dev, 0x9097, 0x1284, 0x04000080);
- nv_mthd(dev, 0x9097, 0x1450, 0x00300008);
- nv_mthd(dev, 0x9097, 0x1454, 0x04000080);
- nv_mthd(dev, 0x9097, 0x0214, 0x00000000);
- /* in trace, right after 0x90c0, not here */
- nv_mthd(dev, 0x9097, 0x3410, 0x80002006);
-}
-
-static void
-nvc0_grctx_generate_9197(struct drm_device *dev)
-{
- u32 fermi = nvc0_graph_class(dev);
- u32 mthd;
-
- if (fermi == 0x9197) {
- for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
- nv_mthd(dev, 0x9197, mthd, 0x00000000);
- }
- nv_mthd(dev, 0x9197, 0x02e4, 0x0000b001);
-}
-
-static void
-nvc0_grctx_generate_9297(struct drm_device *dev)
-{
- u32 fermi = nvc0_graph_class(dev);
- u32 mthd;
-
- if (fermi == 0x9297) {
- for (mthd = 0x3400; mthd <= 0x35fc; mthd += 4)
- nv_mthd(dev, 0x9297, mthd, 0x00000000);
- }
- nv_mthd(dev, 0x9297, 0x036c, 0x00000000);
- nv_mthd(dev, 0x9297, 0x0370, 0x00000000);
- nv_mthd(dev, 0x9297, 0x07a4, 0x00000000);
- nv_mthd(dev, 0x9297, 0x07a8, 0x00000000);
- nv_mthd(dev, 0x9297, 0x0374, 0x00000000);
- nv_mthd(dev, 0x9297, 0x0378, 0x00000020);
-}
-
-static void
-nvc0_grctx_generate_902d(struct drm_device *dev)
-{
- nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
- nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
- nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
- nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
- nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
- nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
- nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
- nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
- nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
- nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
- nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
- nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
- nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
- nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
- nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
- nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
- nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
-}
-
-static void
-nvc0_grctx_generate_9039(struct drm_device *dev)
-{
- nv_mthd(dev, 0x9039, 0x030c, 0x00000000);
- nv_mthd(dev, 0x9039, 0x0310, 0x00000000);
- nv_mthd(dev, 0x9039, 0x0314, 0x00000000);
- nv_mthd(dev, 0x9039, 0x0320, 0x00000000);
- nv_mthd(dev, 0x9039, 0x0238, 0x00000000);
- nv_mthd(dev, 0x9039, 0x023c, 0x00000000);
- nv_mthd(dev, 0x9039, 0x0318, 0x00000000);
- nv_mthd(dev, 0x9039, 0x031c, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_90c0(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int i;
-
- for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
- nv_mthd(dev, 0x90c0, 0x2700 + (i * 0x40), 0x00000000);
- nv_mthd(dev, 0x90c0, 0x2720 + (i * 0x40), 0x00000000);
- nv_mthd(dev, 0x90c0, 0x2704 + (i * 0x40), 0x00000000);
- nv_mthd(dev, 0x90c0, 0x2724 + (i * 0x40), 0x00000000);
- nv_mthd(dev, 0x90c0, 0x2708 + (i * 0x40), 0x00000000);
- nv_mthd(dev, 0x90c0, 0x2728 + (i * 0x40), 0x00000000);
- }
- nv_mthd(dev, 0x90c0, 0x270c, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x272c, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x274c, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x276c, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x278c, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x27ac, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x27cc, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x27ec, 0x00000000);
- for (i = 0; dev_priv->chipset == 0xd9 && i < 4; i++) {
- nv_mthd(dev, 0x90c0, 0x2710 + (i * 0x40), 0x00014000);
- nv_mthd(dev, 0x90c0, 0x2730 + (i * 0x40), 0x00014000);
- nv_mthd(dev, 0x90c0, 0x2714 + (i * 0x40), 0x00000040);
- nv_mthd(dev, 0x90c0, 0x2734 + (i * 0x40), 0x00000040);
- }
- nv_mthd(dev, 0x90c0, 0x030c, 0x00000001);
- nv_mthd(dev, 0x90c0, 0x1944, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x0758, 0x00000100);
- nv_mthd(dev, 0x90c0, 0x02c4, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x0790, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x0794, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x0798, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x079c, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x07a0, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x077c, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x0204, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x0208, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x020c, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x0214, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x024c, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x0d94, 0x00000001);
- nv_mthd(dev, 0x90c0, 0x1608, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x160c, 0x00000000);
- nv_mthd(dev, 0x90c0, 0x1664, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_dispatch(struct drm_device *dev)
-{
- int i;
-
- nv_wr32(dev, 0x404004, 0x00000000);
- nv_wr32(dev, 0x404008, 0x00000000);
- nv_wr32(dev, 0x40400c, 0x00000000);
- nv_wr32(dev, 0x404010, 0x00000000);
- nv_wr32(dev, 0x404014, 0x00000000);
- nv_wr32(dev, 0x404018, 0x00000000);
- nv_wr32(dev, 0x40401c, 0x00000000);
- nv_wr32(dev, 0x404020, 0x00000000);
- nv_wr32(dev, 0x404024, 0x00000000);
- nv_wr32(dev, 0x404028, 0x00000000);
- nv_wr32(dev, 0x40402c, 0x00000000);
- nv_wr32(dev, 0x404044, 0x00000000);
- nv_wr32(dev, 0x404094, 0x00000000);
- nv_wr32(dev, 0x404098, 0x00000000);
- nv_wr32(dev, 0x40409c, 0x00000000);
- nv_wr32(dev, 0x4040a0, 0x00000000);
- nv_wr32(dev, 0x4040a4, 0x00000000);
- nv_wr32(dev, 0x4040a8, 0x00000000);
- nv_wr32(dev, 0x4040ac, 0x00000000);
- nv_wr32(dev, 0x4040b0, 0x00000000);
- nv_wr32(dev, 0x4040b4, 0x00000000);
- nv_wr32(dev, 0x4040b8, 0x00000000);
- nv_wr32(dev, 0x4040bc, 0x00000000);
- nv_wr32(dev, 0x4040c0, 0x00000000);
- nv_wr32(dev, 0x4040c4, 0x00000000);
- nv_wr32(dev, 0x4040c8, 0xf0000087);
- nv_wr32(dev, 0x4040d4, 0x00000000);
- nv_wr32(dev, 0x4040d8, 0x00000000);
- nv_wr32(dev, 0x4040dc, 0x00000000);
- nv_wr32(dev, 0x4040e0, 0x00000000);
- nv_wr32(dev, 0x4040e4, 0x00000000);
- nv_wr32(dev, 0x4040e8, 0x00001000);
- nv_wr32(dev, 0x4040f8, 0x00000000);
- nv_wr32(dev, 0x404130, 0x00000000);
- nv_wr32(dev, 0x404134, 0x00000000);
- nv_wr32(dev, 0x404138, 0x20000040);
- nv_wr32(dev, 0x404150, 0x0000002e);
- nv_wr32(dev, 0x404154, 0x00000400);
- nv_wr32(dev, 0x404158, 0x00000200);
- nv_wr32(dev, 0x404164, 0x00000055);
- nv_wr32(dev, 0x404168, 0x00000000);
- nv_wr32(dev, 0x404174, 0x00000000);
- nv_wr32(dev, 0x404178, 0x00000000);
- nv_wr32(dev, 0x40417c, 0x00000000);
- for (i = 0; i < 8; i++)
- nv_wr32(dev, 0x404200 + (i * 4), 0x00000000); /* subc */
-}
-
-static void
-nvc0_grctx_generate_macro(struct drm_device *dev)
-{
- nv_wr32(dev, 0x404404, 0x00000000);
- nv_wr32(dev, 0x404408, 0x00000000);
- nv_wr32(dev, 0x40440c, 0x00000000);
- nv_wr32(dev, 0x404410, 0x00000000);
- nv_wr32(dev, 0x404414, 0x00000000);
- nv_wr32(dev, 0x404418, 0x00000000);
- nv_wr32(dev, 0x40441c, 0x00000000);
- nv_wr32(dev, 0x404420, 0x00000000);
- nv_wr32(dev, 0x404424, 0x00000000);
- nv_wr32(dev, 0x404428, 0x00000000);
- nv_wr32(dev, 0x40442c, 0x00000000);
- nv_wr32(dev, 0x404430, 0x00000000);
- nv_wr32(dev, 0x404434, 0x00000000);
- nv_wr32(dev, 0x404438, 0x00000000);
- nv_wr32(dev, 0x404460, 0x00000000);
- nv_wr32(dev, 0x404464, 0x00000000);
- nv_wr32(dev, 0x404468, 0x00ffffff);
- nv_wr32(dev, 0x40446c, 0x00000000);
- nv_wr32(dev, 0x404480, 0x00000001);
- nv_wr32(dev, 0x404498, 0x00000001);
-}
-
-static void
-nvc0_grctx_generate_m2mf(struct drm_device *dev)
-{
- nv_wr32(dev, 0x404604, 0x00000015);
- nv_wr32(dev, 0x404608, 0x00000000);
- nv_wr32(dev, 0x40460c, 0x00002e00);
- nv_wr32(dev, 0x404610, 0x00000100);
- nv_wr32(dev, 0x404618, 0x00000000);
- nv_wr32(dev, 0x40461c, 0x00000000);
- nv_wr32(dev, 0x404620, 0x00000000);
- nv_wr32(dev, 0x404624, 0x00000000);
- nv_wr32(dev, 0x404628, 0x00000000);
- nv_wr32(dev, 0x40462c, 0x00000000);
- nv_wr32(dev, 0x404630, 0x00000000);
- nv_wr32(dev, 0x404634, 0x00000000);
- nv_wr32(dev, 0x404638, 0x00000004);
- nv_wr32(dev, 0x40463c, 0x00000000);
- nv_wr32(dev, 0x404640, 0x00000000);
- nv_wr32(dev, 0x404644, 0x00000000);
- nv_wr32(dev, 0x404648, 0x00000000);
- nv_wr32(dev, 0x40464c, 0x00000000);
- nv_wr32(dev, 0x404650, 0x00000000);
- nv_wr32(dev, 0x404654, 0x00000000);
- nv_wr32(dev, 0x404658, 0x00000000);
- nv_wr32(dev, 0x40465c, 0x007f0100);
- nv_wr32(dev, 0x404660, 0x00000000);
- nv_wr32(dev, 0x404664, 0x00000000);
- nv_wr32(dev, 0x404668, 0x00000000);
- nv_wr32(dev, 0x40466c, 0x00000000);
- nv_wr32(dev, 0x404670, 0x00000000);
- nv_wr32(dev, 0x404674, 0x00000000);
- nv_wr32(dev, 0x404678, 0x00000000);
- nv_wr32(dev, 0x40467c, 0x00000002);
- nv_wr32(dev, 0x404680, 0x00000000);
- nv_wr32(dev, 0x404684, 0x00000000);
- nv_wr32(dev, 0x404688, 0x00000000);
- nv_wr32(dev, 0x40468c, 0x00000000);
- nv_wr32(dev, 0x404690, 0x00000000);
- nv_wr32(dev, 0x404694, 0x00000000);
- nv_wr32(dev, 0x404698, 0x00000000);
- nv_wr32(dev, 0x40469c, 0x00000000);
- nv_wr32(dev, 0x4046a0, 0x007f0080);
- nv_wr32(dev, 0x4046a4, 0x00000000);
- nv_wr32(dev, 0x4046a8, 0x00000000);
- nv_wr32(dev, 0x4046ac, 0x00000000);
- nv_wr32(dev, 0x4046b0, 0x00000000);
- nv_wr32(dev, 0x4046b4, 0x00000000);
- nv_wr32(dev, 0x4046b8, 0x00000000);
- nv_wr32(dev, 0x4046bc, 0x00000000);
- nv_wr32(dev, 0x4046c0, 0x00000000);
- nv_wr32(dev, 0x4046c4, 0x00000000);
- nv_wr32(dev, 0x4046c8, 0x00000000);
- nv_wr32(dev, 0x4046cc, 0x00000000);
- nv_wr32(dev, 0x4046d0, 0x00000000);
- nv_wr32(dev, 0x4046d4, 0x00000000);
- nv_wr32(dev, 0x4046d8, 0x00000000);
- nv_wr32(dev, 0x4046dc, 0x00000000);
- nv_wr32(dev, 0x4046e0, 0x00000000);
- nv_wr32(dev, 0x4046e4, 0x00000000);
- nv_wr32(dev, 0x4046e8, 0x00000000);
- nv_wr32(dev, 0x4046f0, 0x00000000);
- nv_wr32(dev, 0x4046f4, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_unk47xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x404700, 0x00000000);
- nv_wr32(dev, 0x404704, 0x00000000);
- nv_wr32(dev, 0x404708, 0x00000000);
- nv_wr32(dev, 0x40470c, 0x00000000);
- nv_wr32(dev, 0x404710, 0x00000000);
- nv_wr32(dev, 0x404714, 0x00000000);
- nv_wr32(dev, 0x404718, 0x00000000);
- nv_wr32(dev, 0x40471c, 0x00000000);
- nv_wr32(dev, 0x404720, 0x00000000);
- nv_wr32(dev, 0x404724, 0x00000000);
- nv_wr32(dev, 0x404728, 0x00000000);
- nv_wr32(dev, 0x40472c, 0x00000000);
- nv_wr32(dev, 0x404730, 0x00000000);
- nv_wr32(dev, 0x404734, 0x00000100);
- nv_wr32(dev, 0x404738, 0x00000000);
- nv_wr32(dev, 0x40473c, 0x00000000);
- nv_wr32(dev, 0x404740, 0x00000000);
- nv_wr32(dev, 0x404744, 0x00000000);
- nv_wr32(dev, 0x404748, 0x00000000);
- nv_wr32(dev, 0x40474c, 0x00000000);
- nv_wr32(dev, 0x404750, 0x00000000);
- nv_wr32(dev, 0x404754, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_shaders(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- if (dev_priv->chipset == 0xd9) {
- nv_wr32(dev, 0x405800, 0x0f8000bf);
- nv_wr32(dev, 0x405830, 0x02180218);
- nv_wr32(dev, 0x405834, 0x08000000);
- } else
- if (dev_priv->chipset == 0xc1) {
- nv_wr32(dev, 0x405800, 0x0f8000bf);
- nv_wr32(dev, 0x405830, 0x02180218);
- nv_wr32(dev, 0x405834, 0x00000000);
- } else {
- nv_wr32(dev, 0x405800, 0x078000bf);
- nv_wr32(dev, 0x405830, 0x02180000);
- nv_wr32(dev, 0x405834, 0x00000000);
- }
- nv_wr32(dev, 0x405838, 0x00000000);
- nv_wr32(dev, 0x405854, 0x00000000);
- nv_wr32(dev, 0x405870, 0x00000001);
- nv_wr32(dev, 0x405874, 0x00000001);
- nv_wr32(dev, 0x405878, 0x00000001);
- nv_wr32(dev, 0x40587c, 0x00000001);
- nv_wr32(dev, 0x405a00, 0x00000000);
- nv_wr32(dev, 0x405a04, 0x00000000);
- nv_wr32(dev, 0x405a18, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_unk60xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x406020, 0x000103c1);
- nv_wr32(dev, 0x406028, 0x00000001);
- nv_wr32(dev, 0x40602c, 0x00000001);
- nv_wr32(dev, 0x406030, 0x00000001);
- nv_wr32(dev, 0x406034, 0x00000001);
-}
-
-static void
-nvc0_grctx_generate_unk64xx(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- nv_wr32(dev, 0x4064a8, 0x00000000);
- nv_wr32(dev, 0x4064ac, 0x00003fff);
- nv_wr32(dev, 0x4064b4, 0x00000000);
- nv_wr32(dev, 0x4064b8, 0x00000000);
- if (dev_priv->chipset == 0xd9)
- nv_wr32(dev, 0x4064bc, 0x00000000);
- if (dev_priv->chipset == 0xc1 ||
- dev_priv->chipset == 0xd9) {
- nv_wr32(dev, 0x4064c0, 0x80140078);
- nv_wr32(dev, 0x4064c4, 0x0086ffff);
- }
-}
-
-static void
-nvc0_grctx_generate_tpbus(struct drm_device *dev)
-{
- nv_wr32(dev, 0x407804, 0x00000023);
- nv_wr32(dev, 0x40780c, 0x0a418820);
- nv_wr32(dev, 0x407810, 0x062080e6);
- nv_wr32(dev, 0x407814, 0x020398a4);
- nv_wr32(dev, 0x407818, 0x0e629062);
- nv_wr32(dev, 0x40781c, 0x0a418820);
- nv_wr32(dev, 0x407820, 0x000000e6);
- nv_wr32(dev, 0x4078bc, 0x00000103);
-}
-
-static void
-nvc0_grctx_generate_ccache(struct drm_device *dev)
-{
- nv_wr32(dev, 0x408000, 0x00000000);
- nv_wr32(dev, 0x408004, 0x00000000);
- nv_wr32(dev, 0x408008, 0x00000018);
- nv_wr32(dev, 0x40800c, 0x00000000);
- nv_wr32(dev, 0x408010, 0x00000000);
- nv_wr32(dev, 0x408014, 0x00000069);
- nv_wr32(dev, 0x408018, 0xe100e100);
- nv_wr32(dev, 0x408064, 0x00000000);
-}
-
-static void
-nvc0_grctx_generate_rop(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chipset = dev_priv->chipset;
-
- /* ROPC_BROADCAST */
- nv_wr32(dev, 0x408800, 0x02802a3c);
- nv_wr32(dev, 0x408804, 0x00000040);
- if (chipset == 0xd9) {
- nv_wr32(dev, 0x408808, 0x1043e005);
- nv_wr32(dev, 0x408900, 0x3080b801);
- nv_wr32(dev, 0x408904, 0x1043e005);
- nv_wr32(dev, 0x408908, 0x00c8102f);
- } else
- if (chipset == 0xc1) {
- nv_wr32(dev, 0x408808, 0x1003e005);
- nv_wr32(dev, 0x408900, 0x3080b801);
- nv_wr32(dev, 0x408904, 0x62000001);
- nv_wr32(dev, 0x408908, 0x00c80929);
- } else {
- nv_wr32(dev, 0x408808, 0x0003e00d);
- nv_wr32(dev, 0x408900, 0x3080b801);
- nv_wr32(dev, 0x408904, 0x02000001);
- nv_wr32(dev, 0x408908, 0x00c80929);
- }
- nv_wr32(dev, 0x40890c, 0x00000000);
- nv_wr32(dev, 0x408980, 0x0000011d);
-}
-
-static void
-nvc0_grctx_generate_gpc(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chipset = dev_priv->chipset;
- int i;
-
- /* GPC_BROADCAST */
- nv_wr32(dev, 0x418380, 0x00000016);
- nv_wr32(dev, 0x418400, 0x38004e00);
- nv_wr32(dev, 0x418404, 0x71e0ffff);
- nv_wr32(dev, 0x418408, 0x00000000);
- nv_wr32(dev, 0x41840c, 0x00001008);
- nv_wr32(dev, 0x418410, 0x0fff0fff);
- nv_wr32(dev, 0x418414, chipset != 0xd9 ? 0x00200fff : 0x02200fff);
- nv_wr32(dev, 0x418450, 0x00000000);
- nv_wr32(dev, 0x418454, 0x00000000);
- nv_wr32(dev, 0x418458, 0x00000000);
- nv_wr32(dev, 0x41845c, 0x00000000);
- nv_wr32(dev, 0x418460, 0x00000000);
- nv_wr32(dev, 0x418464, 0x00000000);
- nv_wr32(dev, 0x418468, 0x00000001);
- nv_wr32(dev, 0x41846c, 0x00000000);
- nv_wr32(dev, 0x418470, 0x00000000);
- nv_wr32(dev, 0x418600, 0x0000001f);
- nv_wr32(dev, 0x418684, 0x0000000f);
- nv_wr32(dev, 0x418700, 0x00000002);
- nv_wr32(dev, 0x418704, 0x00000080);
- nv_wr32(dev, 0x418708, 0x00000000);
- nv_wr32(dev, 0x41870c, chipset != 0xd9 ? 0x07c80000 : 0x00000000);
- nv_wr32(dev, 0x418710, 0x00000000);
- nv_wr32(dev, 0x418800, chipset != 0xd9 ? 0x0006860a : 0x7006860a);
- nv_wr32(dev, 0x418808, 0x00000000);
- nv_wr32(dev, 0x41880c, 0x00000000);
- nv_wr32(dev, 0x418810, 0x00000000);
- nv_wr32(dev, 0x418828, 0x00008442);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x418830, 0x10000001);
- else
- nv_wr32(dev, 0x418830, 0x00000001);
- nv_wr32(dev, 0x4188d8, 0x00000008);
- nv_wr32(dev, 0x4188e0, 0x01000000);
- nv_wr32(dev, 0x4188e8, 0x00000000);
- nv_wr32(dev, 0x4188ec, 0x00000000);
- nv_wr32(dev, 0x4188f0, 0x00000000);
- nv_wr32(dev, 0x4188f4, 0x00000000);
- nv_wr32(dev, 0x4188f8, 0x00000000);
- if (chipset == 0xd9)
- nv_wr32(dev, 0x4188fc, 0x20100008);
- else if (chipset == 0xc1)
- nv_wr32(dev, 0x4188fc, 0x00100018);
- else
- nv_wr32(dev, 0x4188fc, 0x00100000);
- nv_wr32(dev, 0x41891c, 0x00ff00ff);
- nv_wr32(dev, 0x418924, 0x00000000);
- nv_wr32(dev, 0x418928, 0x00ffff00);
- nv_wr32(dev, 0x41892c, 0x0000ff00);
- for (i = 0; i < 8; i++) {
- nv_wr32(dev, 0x418a00 + (i * 0x20), 0x00000000);
- nv_wr32(dev, 0x418a04 + (i * 0x20), 0x00000000);
- nv_wr32(dev, 0x418a08 + (i * 0x20), 0x00000000);
- nv_wr32(dev, 0x418a0c + (i * 0x20), 0x00010000);
- nv_wr32(dev, 0x418a10 + (i * 0x20), 0x00000000);
- nv_wr32(dev, 0x418a14 + (i * 0x20), 0x00000000);
- nv_wr32(dev, 0x418a18 + (i * 0x20), 0x00000000);
- }
- nv_wr32(dev, 0x418b00, chipset != 0xd9 ? 0x00000000 : 0x00000006);
- nv_wr32(dev, 0x418b08, 0x0a418820);
- nv_wr32(dev, 0x418b0c, 0x062080e6);
- nv_wr32(dev, 0x418b10, 0x020398a4);
- nv_wr32(dev, 0x418b14, 0x0e629062);
- nv_wr32(dev, 0x418b18, 0x0a418820);
- nv_wr32(dev, 0x418b1c, 0x000000e6);
- nv_wr32(dev, 0x418bb8, 0x00000103);
- nv_wr32(dev, 0x418c08, 0x00000001);
- nv_wr32(dev, 0x418c10, 0x00000000);
- nv_wr32(dev, 0x418c14, 0x00000000);
- nv_wr32(dev, 0x418c18, 0x00000000);
- nv_wr32(dev, 0x418c1c, 0x00000000);
- nv_wr32(dev, 0x418c20, 0x00000000);
- nv_wr32(dev, 0x418c24, 0x00000000);
- nv_wr32(dev, 0x418c28, 0x00000000);
- nv_wr32(dev, 0x418c2c, 0x00000000);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x418c6c, 0x00000001);
- nv_wr32(dev, 0x418c80, 0x20200004);
- nv_wr32(dev, 0x418c8c, 0x00000001);
- nv_wr32(dev, 0x419000, 0x00000780);
- nv_wr32(dev, 0x419004, 0x00000000);
- nv_wr32(dev, 0x419008, 0x00000000);
- nv_wr32(dev, 0x419014, 0x00000004);
-}
-
-static void
-nvc0_grctx_generate_tp(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chipset = dev_priv->chipset;
-
- /* GPC_BROADCAST.TP_BROADCAST */
- nv_wr32(dev, 0x419818, 0x00000000);
- nv_wr32(dev, 0x41983c, 0x00038bc7);
- nv_wr32(dev, 0x419848, 0x00000000);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x419864, 0x00000129);
- else
- nv_wr32(dev, 0x419864, 0x0000012a);
- nv_wr32(dev, 0x419888, 0x00000000);
- nv_wr32(dev, 0x419a00, 0x000001f0);
- nv_wr32(dev, 0x419a04, 0x00000001);
- nv_wr32(dev, 0x419a08, 0x00000023);
- nv_wr32(dev, 0x419a0c, 0x00020000);
- nv_wr32(dev, 0x419a10, 0x00000000);
- nv_wr32(dev, 0x419a14, 0x00000200);
- nv_wr32(dev, 0x419a1c, 0x00000000);
- nv_wr32(dev, 0x419a20, 0x00000800);
- if (chipset == 0xd9)
- nv_wr32(dev, 0x00419ac4, 0x0017f440);
- else if (chipset != 0xc0 && chipset != 0xc8)
- nv_wr32(dev, 0x00419ac4, 0x0007f440);
- nv_wr32(dev, 0x419b00, 0x0a418820);
- nv_wr32(dev, 0x419b04, 0x062080e6);
- nv_wr32(dev, 0x419b08, 0x020398a4);
- nv_wr32(dev, 0x419b0c, 0x0e629062);
- nv_wr32(dev, 0x419b10, 0x0a418820);
- nv_wr32(dev, 0x419b14, 0x000000e6);
- nv_wr32(dev, 0x419bd0, 0x00900103);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x419be0, 0x00400001);
- else
- nv_wr32(dev, 0x419be0, 0x00000001);
- nv_wr32(dev, 0x419be4, 0x00000000);
- nv_wr32(dev, 0x419c00, chipset != 0xd9 ? 0x00000002 : 0x0000000a);
- nv_wr32(dev, 0x419c04, 0x00000006);
- nv_wr32(dev, 0x419c08, 0x00000002);
- nv_wr32(dev, 0x419c20, 0x00000000);
- if (dev_priv->chipset == 0xd9) {
- nv_wr32(dev, 0x419c24, 0x00084210);
- nv_wr32(dev, 0x419c28, 0x3cf3cf3c);
- nv_wr32(dev, 0x419cb0, 0x00020048);
- } else
- if (chipset == 0xce || chipset == 0xcf) {
- nv_wr32(dev, 0x419cb0, 0x00020048);
- } else {
- nv_wr32(dev, 0x419cb0, 0x00060048);
- }
- nv_wr32(dev, 0x419ce8, 0x00000000);
- nv_wr32(dev, 0x419cf4, 0x00000183);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x419d20, 0x12180000);
- else
- nv_wr32(dev, 0x419d20, 0x02180000);
- nv_wr32(dev, 0x419d24, 0x00001fff);
- if (chipset == 0xc1 || chipset == 0xd9)
- nv_wr32(dev, 0x419d44, 0x02180218);
- nv_wr32(dev, 0x419e04, 0x00000000);
- nv_wr32(dev, 0x419e08, 0x00000000);
- nv_wr32(dev, 0x419e0c, 0x00000000);
- nv_wr32(dev, 0x419e10, 0x00000002);
- nv_wr32(dev, 0x419e44, 0x001beff2);
- nv_wr32(dev, 0x419e48, 0x00000000);
- nv_wr32(dev, 0x419e4c, 0x0000000f);
- nv_wr32(dev, 0x419e50, 0x00000000);
- nv_wr32(dev, 0x419e54, 0x00000000);
- nv_wr32(dev, 0x419e58, 0x00000000);
- nv_wr32(dev, 0x419e5c, 0x00000000);
- nv_wr32(dev, 0x419e60, 0x00000000);
- nv_wr32(dev, 0x419e64, 0x00000000);
- nv_wr32(dev, 0x419e68, 0x00000000);
- nv_wr32(dev, 0x419e6c, 0x00000000);
- nv_wr32(dev, 0x419e70, 0x00000000);
- nv_wr32(dev, 0x419e74, 0x00000000);
- nv_wr32(dev, 0x419e78, 0x00000000);
- nv_wr32(dev, 0x419e7c, 0x00000000);
- nv_wr32(dev, 0x419e80, 0x00000000);
- nv_wr32(dev, 0x419e84, 0x00000000);
- nv_wr32(dev, 0x419e88, 0x00000000);
- nv_wr32(dev, 0x419e8c, 0x00000000);
- nv_wr32(dev, 0x419e90, 0x00000000);
- nv_wr32(dev, 0x419e98, 0x00000000);
- if (chipset != 0xc0 && chipset != 0xc8)
- nv_wr32(dev, 0x419ee0, 0x00011110);
- nv_wr32(dev, 0x419f50, 0x00000000);
- nv_wr32(dev, 0x419f54, 0x00000000);
- if (chipset != 0xc0 && chipset != 0xc8)
- nv_wr32(dev, 0x419f58, 0x00000000);
-}
-
-int
-nvc0_grctx_generate(struct nouveau_channel *chan)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
- struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
- struct drm_device *dev = chan->dev;
- int i, gpc, tp, id;
- u32 fermi = nvc0_graph_class(dev);
- u32 r000260, tmp;
-
- r000260 = nv_rd32(dev, 0x000260);
- nv_wr32(dev, 0x000260, r000260 & ~1);
- nv_wr32(dev, 0x400208, 0x00000000);
-
- nvc0_grctx_generate_dispatch(dev);
- nvc0_grctx_generate_macro(dev);
- nvc0_grctx_generate_m2mf(dev);
- nvc0_grctx_generate_unk47xx(dev);
- nvc0_grctx_generate_shaders(dev);
- nvc0_grctx_generate_unk60xx(dev);
- nvc0_grctx_generate_unk64xx(dev);
- nvc0_grctx_generate_tpbus(dev);
- nvc0_grctx_generate_ccache(dev);
- nvc0_grctx_generate_rop(dev);
- nvc0_grctx_generate_gpc(dev);
- nvc0_grctx_generate_tp(dev);
-
- nv_wr32(dev, 0x404154, 0x00000000);
-
- /* fuc "mmio list" writes */
- for (i = 0; i < grch->mmio_nr * 8; i += 8) {
- u32 reg = nv_ro32(grch->mmio, i + 0);
- nv_wr32(dev, reg, nv_ro32(grch->mmio, i + 4));
- }
-
- for (tp = 0, id = 0; tp < 4; tp++) {
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- if (tp < priv->tp_nr[gpc]) {
- nv_wr32(dev, TP_UNIT(gpc, tp, 0x698), id);
- nv_wr32(dev, TP_UNIT(gpc, tp, 0x4e8), id);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tp * 4), id);
- nv_wr32(dev, TP_UNIT(gpc, tp, 0x088), id);
- id++;
- }
-
- nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tp_nr[gpc]);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tp_nr[gpc]);
- }
- }
-
- tmp = 0;
- for (i = 0; i < priv->gpc_nr; i++)
- tmp |= priv->tp_nr[i] << (i * 4);
- nv_wr32(dev, 0x406028, tmp);
- nv_wr32(dev, 0x405870, tmp);
-
- nv_wr32(dev, 0x40602c, 0x00000000);
- nv_wr32(dev, 0x405874, 0x00000000);
- nv_wr32(dev, 0x406030, 0x00000000);
- nv_wr32(dev, 0x405878, 0x00000000);
- nv_wr32(dev, 0x406034, 0x00000000);
- nv_wr32(dev, 0x40587c, 0x00000000);
-
- if (1) {
- u8 tpnr[GPC_MAX], data[TP_MAX];
-
- memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
- memset(data, 0x1f, sizeof(data));
-
- gpc = -1;
- for (tp = 0; tp < priv->tp_total; tp++) {
- do {
- gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpnr[gpc]);
- tpnr[gpc]--;
- data[tp] = gpc;
- }
-
- for (i = 0; i < 4; i++)
- nv_wr32(dev, 0x4060a8 + (i * 4), ((u32 *)data)[i]);
- }
-
- if (1) {
- u32 data[6] = {}, data2[2] = {};
- u8 tpnr[GPC_MAX];
- u8 shift, ntpcv;
-
- /* calculate first set of magics */
- memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
-
- gpc = -1;
- for (tp = 0; tp < priv->tp_total; tp++) {
- do {
- gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpnr[gpc]);
- tpnr[gpc]--;
-
- data[tp / 6] |= gpc << ((tp % 6) * 5);
- }
-
- for (; tp < 32; tp++)
- data[tp / 6] |= 7 << ((tp % 6) * 5);
-
- /* and the second... */
- shift = 0;
- ntpcv = priv->tp_total;
- while (!(ntpcv & (1 << 4))) {
- ntpcv <<= 1;
- shift++;
- }
-
- data2[0] = (ntpcv << 16);
- data2[0] |= (shift << 21);
- data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
- for (i = 1; i < 7; i++)
- data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
-
- /* GPC_BROADCAST */
- nv_wr32(dev, 0x418bb8, (priv->tp_total << 8) |
- priv->magic_not_rop_nr);
- for (i = 0; i < 6; i++)
- nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
-
- /* GPC_BROADCAST.TP_BROADCAST */
- nv_wr32(dev, 0x419bd0, (priv->tp_total << 8) |
- priv->magic_not_rop_nr |
- data2[0]);
- nv_wr32(dev, 0x419be4, data2[1]);
- for (i = 0; i < 6; i++)
- nv_wr32(dev, 0x419b00 + (i * 4), data[i]);
-
- /* UNK78xx */
- nv_wr32(dev, 0x4078bc, (priv->tp_total << 8) |
- priv->magic_not_rop_nr);
- for (i = 0; i < 6; i++)
- nv_wr32(dev, 0x40780c + (i * 4), data[i]);
- }
-
- if (1) {
- u32 tp_mask = 0, tp_set = 0;
- u8 tpnr[GPC_MAX], a, b;
-
- memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
- for (gpc = 0; gpc < priv->gpc_nr; gpc++)
- tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
-
- for (i = 0, gpc = -1, b = -1; i < 32; i++) {
- a = (i * (priv->tp_total - 1)) / 32;
- if (a != b) {
- b = a;
- do {
- gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpnr[gpc]);
- tp = priv->tp_nr[gpc] - tpnr[gpc]--;
-
- tp_set |= 1 << ((gpc * 8) + tp);
- }
-
- nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
- nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
- }
- }
-
- nv_wr32(dev, 0x400208, 0x80000000);
-
- nv_icmd(dev, 0x00001000, 0x00000004);
- nv_icmd(dev, 0x000000a9, 0x0000ffff);
- nv_icmd(dev, 0x00000038, 0x0fac6881);
- nv_icmd(dev, 0x0000003d, 0x00000001);
- nv_icmd(dev, 0x000000e8, 0x00000400);
- nv_icmd(dev, 0x000000e9, 0x00000400);
- nv_icmd(dev, 0x000000ea, 0x00000400);
- nv_icmd(dev, 0x000000eb, 0x00000400);
- nv_icmd(dev, 0x000000ec, 0x00000400);
- nv_icmd(dev, 0x000000ed, 0x00000400);
- nv_icmd(dev, 0x000000ee, 0x00000400);
- nv_icmd(dev, 0x000000ef, 0x00000400);
- nv_icmd(dev, 0x00000078, 0x00000300);
- nv_icmd(dev, 0x00000079, 0x00000300);
- nv_icmd(dev, 0x0000007a, 0x00000300);
- nv_icmd(dev, 0x0000007b, 0x00000300);
- nv_icmd(dev, 0x0000007c, 0x00000300);
- nv_icmd(dev, 0x0000007d, 0x00000300);
- nv_icmd(dev, 0x0000007e, 0x00000300);
- nv_icmd(dev, 0x0000007f, 0x00000300);
- nv_icmd(dev, 0x00000050, 0x00000011);
- nv_icmd(dev, 0x00000058, 0x00000008);
- nv_icmd(dev, 0x00000059, 0x00000008);
- nv_icmd(dev, 0x0000005a, 0x00000008);
- nv_icmd(dev, 0x0000005b, 0x00000008);
- nv_icmd(dev, 0x0000005c, 0x00000008);
- nv_icmd(dev, 0x0000005d, 0x00000008);
- nv_icmd(dev, 0x0000005e, 0x00000008);
- nv_icmd(dev, 0x0000005f, 0x00000008);
- nv_icmd(dev, 0x00000208, 0x00000001);
- nv_icmd(dev, 0x00000209, 0x00000001);
- nv_icmd(dev, 0x0000020a, 0x00000001);
- nv_icmd(dev, 0x0000020b, 0x00000001);
- nv_icmd(dev, 0x0000020c, 0x00000001);
- nv_icmd(dev, 0x0000020d, 0x00000001);
- nv_icmd(dev, 0x0000020e, 0x00000001);
- nv_icmd(dev, 0x0000020f, 0x00000001);
- nv_icmd(dev, 0x00000081, 0x00000001);
- nv_icmd(dev, 0x00000085, 0x00000004);
- nv_icmd(dev, 0x00000088, 0x00000400);
- nv_icmd(dev, 0x00000090, 0x00000300);
- nv_icmd(dev, 0x00000098, 0x00001001);
- nv_icmd(dev, 0x000000e3, 0x00000001);
- nv_icmd(dev, 0x000000da, 0x00000001);
- nv_icmd(dev, 0x000000f8, 0x00000003);
- nv_icmd(dev, 0x000000fa, 0x00000001);
- nv_icmd(dev, 0x0000009f, 0x0000ffff);
- nv_icmd(dev, 0x000000a0, 0x0000ffff);
- nv_icmd(dev, 0x000000a1, 0x0000ffff);
- nv_icmd(dev, 0x000000a2, 0x0000ffff);
- nv_icmd(dev, 0x000000b1, 0x00000001);
- nv_icmd(dev, 0x000000b2, 0x00000000);
- nv_icmd(dev, 0x000000b3, 0x00000000);
- nv_icmd(dev, 0x000000b4, 0x00000000);
- nv_icmd(dev, 0x000000b5, 0x00000000);
- nv_icmd(dev, 0x000000b6, 0x00000000);
- nv_icmd(dev, 0x000000b7, 0x00000000);
- nv_icmd(dev, 0x000000b8, 0x00000000);
- nv_icmd(dev, 0x000000b9, 0x00000000);
- nv_icmd(dev, 0x000000ba, 0x00000000);
- nv_icmd(dev, 0x000000bb, 0x00000000);
- nv_icmd(dev, 0x000000bc, 0x00000000);
- nv_icmd(dev, 0x000000bd, 0x00000000);
- nv_icmd(dev, 0x000000be, 0x00000000);
- nv_icmd(dev, 0x000000bf, 0x00000000);
- nv_icmd(dev, 0x000000c0, 0x00000000);
- nv_icmd(dev, 0x000000c1, 0x00000000);
- nv_icmd(dev, 0x000000c2, 0x00000000);
- nv_icmd(dev, 0x000000c3, 0x00000000);
- nv_icmd(dev, 0x000000c4, 0x00000000);
- nv_icmd(dev, 0x000000c5, 0x00000000);
- nv_icmd(dev, 0x000000c6, 0x00000000);
- nv_icmd(dev, 0x000000c7, 0x00000000);
- nv_icmd(dev, 0x000000c8, 0x00000000);
- nv_icmd(dev, 0x000000c9, 0x00000000);
- nv_icmd(dev, 0x000000ca, 0x00000000);
- nv_icmd(dev, 0x000000cb, 0x00000000);
- nv_icmd(dev, 0x000000cc, 0x00000000);
- nv_icmd(dev, 0x000000cd, 0x00000000);
- nv_icmd(dev, 0x000000ce, 0x00000000);
- nv_icmd(dev, 0x000000cf, 0x00000000);
- nv_icmd(dev, 0x000000d0, 0x00000000);
- nv_icmd(dev, 0x000000d1, 0x00000000);
- nv_icmd(dev, 0x000000d2, 0x00000000);
- nv_icmd(dev, 0x000000d3, 0x00000000);
- nv_icmd(dev, 0x000000d4, 0x00000000);
- nv_icmd(dev, 0x000000d5, 0x00000000);
- nv_icmd(dev, 0x000000d6, 0x00000000);
- nv_icmd(dev, 0x000000d7, 0x00000000);
- nv_icmd(dev, 0x000000d8, 0x00000000);
- nv_icmd(dev, 0x000000d9, 0x00000000);
- nv_icmd(dev, 0x00000210, 0x00000040);
- nv_icmd(dev, 0x00000211, 0x00000040);
- nv_icmd(dev, 0x00000212, 0x00000040);
- nv_icmd(dev, 0x00000213, 0x00000040);
- nv_icmd(dev, 0x00000214, 0x00000040);
- nv_icmd(dev, 0x00000215, 0x00000040);
- nv_icmd(dev, 0x00000216, 0x00000040);
- nv_icmd(dev, 0x00000217, 0x00000040);
- if (dev_priv->chipset == 0xd9) {
- for (i = 0x0400; i <= 0x0417; i++)
- nv_icmd(dev, i, 0x00000040);
- }
- nv_icmd(dev, 0x00000218, 0x0000c080);
- nv_icmd(dev, 0x00000219, 0x0000c080);
- nv_icmd(dev, 0x0000021a, 0x0000c080);
- nv_icmd(dev, 0x0000021b, 0x0000c080);
- nv_icmd(dev, 0x0000021c, 0x0000c080);
- nv_icmd(dev, 0x0000021d, 0x0000c080);
- nv_icmd(dev, 0x0000021e, 0x0000c080);
- nv_icmd(dev, 0x0000021f, 0x0000c080);
- if (dev_priv->chipset == 0xd9) {
- for (i = 0x0440; i <= 0x0457; i++)
- nv_icmd(dev, i, 0x0000c080);
- }
- nv_icmd(dev, 0x000000ad, 0x0000013e);
- nv_icmd(dev, 0x000000e1, 0x00000010);
- nv_icmd(dev, 0x00000290, 0x00000000);
- nv_icmd(dev, 0x00000291, 0x00000000);
- nv_icmd(dev, 0x00000292, 0x00000000);
- nv_icmd(dev, 0x00000293, 0x00000000);
- nv_icmd(dev, 0x00000294, 0x00000000);
- nv_icmd(dev, 0x00000295, 0x00000000);
- nv_icmd(dev, 0x00000296, 0x00000000);
- nv_icmd(dev, 0x00000297, 0x00000000);
- nv_icmd(dev, 0x00000298, 0x00000000);
- nv_icmd(dev, 0x00000299, 0x00000000);
- nv_icmd(dev, 0x0000029a, 0x00000000);
- nv_icmd(dev, 0x0000029b, 0x00000000);
- nv_icmd(dev, 0x0000029c, 0x00000000);
- nv_icmd(dev, 0x0000029d, 0x00000000);
- nv_icmd(dev, 0x0000029e, 0x00000000);
- nv_icmd(dev, 0x0000029f, 0x00000000);
- nv_icmd(dev, 0x000003b0, 0x00000000);
- nv_icmd(dev, 0x000003b1, 0x00000000);
- nv_icmd(dev, 0x000003b2, 0x00000000);
- nv_icmd(dev, 0x000003b3, 0x00000000);
- nv_icmd(dev, 0x000003b4, 0x00000000);
- nv_icmd(dev, 0x000003b5, 0x00000000);
- nv_icmd(dev, 0x000003b6, 0x00000000);
- nv_icmd(dev, 0x000003b7, 0x00000000);
- nv_icmd(dev, 0x000003b8, 0x00000000);
- nv_icmd(dev, 0x000003b9, 0x00000000);
- nv_icmd(dev, 0x000003ba, 0x00000000);
- nv_icmd(dev, 0x000003bb, 0x00000000);
- nv_icmd(dev, 0x000003bc, 0x00000000);
- nv_icmd(dev, 0x000003bd, 0x00000000);
- nv_icmd(dev, 0x000003be, 0x00000000);
- nv_icmd(dev, 0x000003bf, 0x00000000);
- nv_icmd(dev, 0x000002a0, 0x00000000);
- nv_icmd(dev, 0x000002a1, 0x00000000);
- nv_icmd(dev, 0x000002a2, 0x00000000);
- nv_icmd(dev, 0x000002a3, 0x00000000);
- nv_icmd(dev, 0x000002a4, 0x00000000);
- nv_icmd(dev, 0x000002a5, 0x00000000);
- nv_icmd(dev, 0x000002a6, 0x00000000);
- nv_icmd(dev, 0x000002a7, 0x00000000);
- nv_icmd(dev, 0x000002a8, 0x00000000);
- nv_icmd(dev, 0x000002a9, 0x00000000);
- nv_icmd(dev, 0x000002aa, 0x00000000);
- nv_icmd(dev, 0x000002ab, 0x00000000);
- nv_icmd(dev, 0x000002ac, 0x00000000);
- nv_icmd(dev, 0x000002ad, 0x00000000);
- nv_icmd(dev, 0x000002ae, 0x00000000);
- nv_icmd(dev, 0x000002af, 0x00000000);
- nv_icmd(dev, 0x00000420, 0x00000000);
- nv_icmd(dev, 0x00000421, 0x00000000);
- nv_icmd(dev, 0x00000422, 0x00000000);
- nv_icmd(dev, 0x00000423, 0x00000000);
- nv_icmd(dev, 0x00000424, 0x00000000);
- nv_icmd(dev, 0x00000425, 0x00000000);
- nv_icmd(dev, 0x00000426, 0x00000000);
- nv_icmd(dev, 0x00000427, 0x00000000);
- nv_icmd(dev, 0x00000428, 0x00000000);
- nv_icmd(dev, 0x00000429, 0x00000000);
- nv_icmd(dev, 0x0000042a, 0x00000000);
- nv_icmd(dev, 0x0000042b, 0x00000000);
- nv_icmd(dev, 0x0000042c, 0x00000000);
- nv_icmd(dev, 0x0000042d, 0x00000000);
- nv_icmd(dev, 0x0000042e, 0x00000000);
- nv_icmd(dev, 0x0000042f, 0x00000000);
- nv_icmd(dev, 0x000002b0, 0x00000000);
- nv_icmd(dev, 0x000002b1, 0x00000000);
- nv_icmd(dev, 0x000002b2, 0x00000000);
- nv_icmd(dev, 0x000002b3, 0x00000000);
- nv_icmd(dev, 0x000002b4, 0x00000000);
- nv_icmd(dev, 0x000002b5, 0x00000000);
- nv_icmd(dev, 0x000002b6, 0x00000000);
- nv_icmd(dev, 0x000002b7, 0x00000000);
- nv_icmd(dev, 0x000002b8, 0x00000000);
- nv_icmd(dev, 0x000002b9, 0x00000000);
- nv_icmd(dev, 0x000002ba, 0x00000000);
- nv_icmd(dev, 0x000002bb, 0x00000000);
- nv_icmd(dev, 0x000002bc, 0x00000000);
- nv_icmd(dev, 0x000002bd, 0x00000000);
- nv_icmd(dev, 0x000002be, 0x00000000);
- nv_icmd(dev, 0x000002bf, 0x00000000);
- nv_icmd(dev, 0x00000430, 0x00000000);
- nv_icmd(dev, 0x00000431, 0x00000000);
- nv_icmd(dev, 0x00000432, 0x00000000);
- nv_icmd(dev, 0x00000433, 0x00000000);
- nv_icmd(dev, 0x00000434, 0x00000000);
- nv_icmd(dev, 0x00000435, 0x00000000);
- nv_icmd(dev, 0x00000436, 0x00000000);
- nv_icmd(dev, 0x00000437, 0x00000000);
- nv_icmd(dev, 0x00000438, 0x00000000);
- nv_icmd(dev, 0x00000439, 0x00000000);
- nv_icmd(dev, 0x0000043a, 0x00000000);
- nv_icmd(dev, 0x0000043b, 0x00000000);
- nv_icmd(dev, 0x0000043c, 0x00000000);
- nv_icmd(dev, 0x0000043d, 0x00000000);
- nv_icmd(dev, 0x0000043e, 0x00000000);
- nv_icmd(dev, 0x0000043f, 0x00000000);
- nv_icmd(dev, 0x000002c0, 0x00000000);
- nv_icmd(dev, 0x000002c1, 0x00000000);
- nv_icmd(dev, 0x000002c2, 0x00000000);
- nv_icmd(dev, 0x000002c3, 0x00000000);
- nv_icmd(dev, 0x000002c4, 0x00000000);
- nv_icmd(dev, 0x000002c5, 0x00000000);
- nv_icmd(dev, 0x000002c6, 0x00000000);
- nv_icmd(dev, 0x000002c7, 0x00000000);
- nv_icmd(dev, 0x000002c8, 0x00000000);
- nv_icmd(dev, 0x000002c9, 0x00000000);
- nv_icmd(dev, 0x000002ca, 0x00000000);
- nv_icmd(dev, 0x000002cb, 0x00000000);
- nv_icmd(dev, 0x000002cc, 0x00000000);
- nv_icmd(dev, 0x000002cd, 0x00000000);
- nv_icmd(dev, 0x000002ce, 0x00000000);
- nv_icmd(dev, 0x000002cf, 0x00000000);
- nv_icmd(dev, 0x000004d0, 0x00000000);
- nv_icmd(dev, 0x000004d1, 0x00000000);
- nv_icmd(dev, 0x000004d2, 0x00000000);
- nv_icmd(dev, 0x000004d3, 0x00000000);
- nv_icmd(dev, 0x000004d4, 0x00000000);
- nv_icmd(dev, 0x000004d5, 0x00000000);
- nv_icmd(dev, 0x000004d6, 0x00000000);
- nv_icmd(dev, 0x000004d7, 0x00000000);
- nv_icmd(dev, 0x000004d8, 0x00000000);
- nv_icmd(dev, 0x000004d9, 0x00000000);
- nv_icmd(dev, 0x000004da, 0x00000000);
- nv_icmd(dev, 0x000004db, 0x00000000);
- nv_icmd(dev, 0x000004dc, 0x00000000);
- nv_icmd(dev, 0x000004dd, 0x00000000);
- nv_icmd(dev, 0x000004de, 0x00000000);
- nv_icmd(dev, 0x000004df, 0x00000000);
- nv_icmd(dev, 0x00000720, 0x00000000);
- nv_icmd(dev, 0x00000721, 0x00000000);
- nv_icmd(dev, 0x00000722, 0x00000000);
- nv_icmd(dev, 0x00000723, 0x00000000);
- nv_icmd(dev, 0x00000724, 0x00000000);
- nv_icmd(dev, 0x00000725, 0x00000000);
- nv_icmd(dev, 0x00000726, 0x00000000);
- nv_icmd(dev, 0x00000727, 0x00000000);
- nv_icmd(dev, 0x00000728, 0x00000000);
- nv_icmd(dev, 0x00000729, 0x00000000);
- nv_icmd(dev, 0x0000072a, 0x00000000);
- nv_icmd(dev, 0x0000072b, 0x00000000);
- nv_icmd(dev, 0x0000072c, 0x00000000);
- nv_icmd(dev, 0x0000072d, 0x00000000);
- nv_icmd(dev, 0x0000072e, 0x00000000);
- nv_icmd(dev, 0x0000072f, 0x00000000);
- nv_icmd(dev, 0x000008c0, 0x00000000);
- nv_icmd(dev, 0x000008c1, 0x00000000);
- nv_icmd(dev, 0x000008c2, 0x00000000);
- nv_icmd(dev, 0x000008c3, 0x00000000);
- nv_icmd(dev, 0x000008c4, 0x00000000);
- nv_icmd(dev, 0x000008c5, 0x00000000);
- nv_icmd(dev, 0x000008c6, 0x00000000);
- nv_icmd(dev, 0x000008c7, 0x00000000);
- nv_icmd(dev, 0x000008c8, 0x00000000);
- nv_icmd(dev, 0x000008c9, 0x00000000);
- nv_icmd(dev, 0x000008ca, 0x00000000);
- nv_icmd(dev, 0x000008cb, 0x00000000);
- nv_icmd(dev, 0x000008cc, 0x00000000);
- nv_icmd(dev, 0x000008cd, 0x00000000);
- nv_icmd(dev, 0x000008ce, 0x00000000);
- nv_icmd(dev, 0x000008cf, 0x00000000);
- nv_icmd(dev, 0x00000890, 0x00000000);
- nv_icmd(dev, 0x00000891, 0x00000000);
- nv_icmd(dev, 0x00000892, 0x00000000);
- nv_icmd(dev, 0x00000893, 0x00000000);
- nv_icmd(dev, 0x00000894, 0x00000000);
- nv_icmd(dev, 0x00000895, 0x00000000);
- nv_icmd(dev, 0x00000896, 0x00000000);
- nv_icmd(dev, 0x00000897, 0x00000000);
- nv_icmd(dev, 0x00000898, 0x00000000);
- nv_icmd(dev, 0x00000899, 0x00000000);
- nv_icmd(dev, 0x0000089a, 0x00000000);
- nv_icmd(dev, 0x0000089b, 0x00000000);
- nv_icmd(dev, 0x0000089c, 0x00000000);
- nv_icmd(dev, 0x0000089d, 0x00000000);
- nv_icmd(dev, 0x0000089e, 0x00000000);
- nv_icmd(dev, 0x0000089f, 0x00000000);
- nv_icmd(dev, 0x000008e0, 0x00000000);
- nv_icmd(dev, 0x000008e1, 0x00000000);
- nv_icmd(dev, 0x000008e2, 0x00000000);
- nv_icmd(dev, 0x000008e3, 0x00000000);
- nv_icmd(dev, 0x000008e4, 0x00000000);
- nv_icmd(dev, 0x000008e5, 0x00000000);
- nv_icmd(dev, 0x000008e6, 0x00000000);
- nv_icmd(dev, 0x000008e7, 0x00000000);
- nv_icmd(dev, 0x000008e8, 0x00000000);
- nv_icmd(dev, 0x000008e9, 0x00000000);
- nv_icmd(dev, 0x000008ea, 0x00000000);
- nv_icmd(dev, 0x000008eb, 0x00000000);
- nv_icmd(dev, 0x000008ec, 0x00000000);
- nv_icmd(dev, 0x000008ed, 0x00000000);
- nv_icmd(dev, 0x000008ee, 0x00000000);
- nv_icmd(dev, 0x000008ef, 0x00000000);
- nv_icmd(dev, 0x000008a0, 0x00000000);
- nv_icmd(dev, 0x000008a1, 0x00000000);
- nv_icmd(dev, 0x000008a2, 0x00000000);
- nv_icmd(dev, 0x000008a3, 0x00000000);
- nv_icmd(dev, 0x000008a4, 0x00000000);
- nv_icmd(dev, 0x000008a5, 0x00000000);
- nv_icmd(dev, 0x000008a6, 0x00000000);
- nv_icmd(dev, 0x000008a7, 0x00000000);
- nv_icmd(dev, 0x000008a8, 0x00000000);
- nv_icmd(dev, 0x000008a9, 0x00000000);
- nv_icmd(dev, 0x000008aa, 0x00000000);
- nv_icmd(dev, 0x000008ab, 0x00000000);
- nv_icmd(dev, 0x000008ac, 0x00000000);
- nv_icmd(dev, 0x000008ad, 0x00000000);
- nv_icmd(dev, 0x000008ae, 0x00000000);
- nv_icmd(dev, 0x000008af, 0x00000000);
- nv_icmd(dev, 0x000008f0, 0x00000000);
- nv_icmd(dev, 0x000008f1, 0x00000000);
- nv_icmd(dev, 0x000008f2, 0x00000000);
- nv_icmd(dev, 0x000008f3, 0x00000000);
- nv_icmd(dev, 0x000008f4, 0x00000000);
- nv_icmd(dev, 0x000008f5, 0x00000000);
- nv_icmd(dev, 0x000008f6, 0x00000000);
- nv_icmd(dev, 0x000008f7, 0x00000000);
- nv_icmd(dev, 0x000008f8, 0x00000000);
- nv_icmd(dev, 0x000008f9, 0x00000000);
- nv_icmd(dev, 0x000008fa, 0x00000000);
- nv_icmd(dev, 0x000008fb, 0x00000000);
- nv_icmd(dev, 0x000008fc, 0x00000000);
- nv_icmd(dev, 0x000008fd, 0x00000000);
- nv_icmd(dev, 0x000008fe, 0x00000000);
- nv_icmd(dev, 0x000008ff, 0x00000000);
- nv_icmd(dev, 0x0000094c, 0x000000ff);
- nv_icmd(dev, 0x0000094d, 0xffffffff);
- nv_icmd(dev, 0x0000094e, 0x00000002);
- nv_icmd(dev, 0x000002ec, 0x00000001);
- nv_icmd(dev, 0x00000303, 0x00000001);
- nv_icmd(dev, 0x000002e6, 0x00000001);
- nv_icmd(dev, 0x00000466, 0x00000052);
- nv_icmd(dev, 0x00000301, 0x3f800000);
- nv_icmd(dev, 0x00000304, 0x30201000);
- nv_icmd(dev, 0x00000305, 0x70605040);
- nv_icmd(dev, 0x00000306, 0xb8a89888);
- nv_icmd(dev, 0x00000307, 0xf8e8d8c8);
- nv_icmd(dev, 0x0000030a, 0x00ffff00);
- nv_icmd(dev, 0x0000030b, 0x0000001a);
- nv_icmd(dev, 0x0000030c, 0x00000001);
- nv_icmd(dev, 0x00000318, 0x00000001);
- nv_icmd(dev, 0x00000340, 0x00000000);
- nv_icmd(dev, 0x00000375, 0x00000001);
- nv_icmd(dev, 0x00000351, 0x00000100);
- nv_icmd(dev, 0x0000037d, 0x00000006);
- nv_icmd(dev, 0x000003a0, 0x00000002);
- nv_icmd(dev, 0x000003aa, 0x00000001);
- nv_icmd(dev, 0x000003a9, 0x00000001);
- nv_icmd(dev, 0x00000380, 0x00000001);
- nv_icmd(dev, 0x00000360, 0x00000040);
- nv_icmd(dev, 0x00000366, 0x00000000);
- nv_icmd(dev, 0x00000367, 0x00000000);
- nv_icmd(dev, 0x00000368, 0x00001fff);
- nv_icmd(dev, 0x00000370, 0x00000000);
- nv_icmd(dev, 0x00000371, 0x00000000);
- nv_icmd(dev, 0x00000372, 0x003fffff);
- nv_icmd(dev, 0x0000037a, 0x00000012);
- nv_icmd(dev, 0x000005e0, 0x00000022);
- nv_icmd(dev, 0x000005e1, 0x00000022);
- nv_icmd(dev, 0x000005e2, 0x00000022);
- nv_icmd(dev, 0x000005e3, 0x00000022);
- nv_icmd(dev, 0x000005e4, 0x00000022);
- nv_icmd(dev, 0x00000619, 0x00000003);
- nv_icmd(dev, 0x00000811, 0x00000003);
- nv_icmd(dev, 0x00000812, 0x00000004);
- nv_icmd(dev, 0x00000813, 0x00000006);
- nv_icmd(dev, 0x00000814, 0x00000008);
- nv_icmd(dev, 0x00000815, 0x0000000b);
- nv_icmd(dev, 0x00000800, 0x00000001);
- nv_icmd(dev, 0x00000801, 0x00000001);
- nv_icmd(dev, 0x00000802, 0x00000001);
- nv_icmd(dev, 0x00000803, 0x00000001);
- nv_icmd(dev, 0x00000804, 0x00000001);
- nv_icmd(dev, 0x00000805, 0x00000001);
- nv_icmd(dev, 0x00000632, 0x00000001);
- nv_icmd(dev, 0x00000633, 0x00000002);
- nv_icmd(dev, 0x00000634, 0x00000003);
- nv_icmd(dev, 0x00000635, 0x00000004);
- nv_icmd(dev, 0x00000654, 0x3f800000);
- nv_icmd(dev, 0x00000657, 0x3f800000);
- nv_icmd(dev, 0x00000655, 0x3f800000);
- nv_icmd(dev, 0x00000656, 0x3f800000);
- nv_icmd(dev, 0x000006cd, 0x3f800000);
- nv_icmd(dev, 0x000007f5, 0x3f800000);
- nv_icmd(dev, 0x000007dc, 0x39291909);
- nv_icmd(dev, 0x000007dd, 0x79695949);
- nv_icmd(dev, 0x000007de, 0xb9a99989);
- nv_icmd(dev, 0x000007df, 0xf9e9d9c9);
- nv_icmd(dev, 0x000007e8, 0x00003210);
- nv_icmd(dev, 0x000007e9, 0x00007654);
- nv_icmd(dev, 0x000007ea, 0x00000098);
- nv_icmd(dev, 0x000007ec, 0x39291909);
- nv_icmd(dev, 0x000007ed, 0x79695949);
- nv_icmd(dev, 0x000007ee, 0xb9a99989);
- nv_icmd(dev, 0x000007ef, 0xf9e9d9c9);
- nv_icmd(dev, 0x000007f0, 0x00003210);
- nv_icmd(dev, 0x000007f1, 0x00007654);
- nv_icmd(dev, 0x000007f2, 0x00000098);
- nv_icmd(dev, 0x000005a5, 0x00000001);
- nv_icmd(dev, 0x00000980, 0x00000000);
- nv_icmd(dev, 0x00000981, 0x00000000);
- nv_icmd(dev, 0x00000982, 0x00000000);
- nv_icmd(dev, 0x00000983, 0x00000000);
- nv_icmd(dev, 0x00000984, 0x00000000);
- nv_icmd(dev, 0x00000985, 0x00000000);
- nv_icmd(dev, 0x00000986, 0x00000000);
- nv_icmd(dev, 0x00000987, 0x00000000);
- nv_icmd(dev, 0x00000988, 0x00000000);
- nv_icmd(dev, 0x00000989, 0x00000000);
- nv_icmd(dev, 0x0000098a, 0x00000000);
- nv_icmd(dev, 0x0000098b, 0x00000000);
- nv_icmd(dev, 0x0000098c, 0x00000000);
- nv_icmd(dev, 0x0000098d, 0x00000000);
- nv_icmd(dev, 0x0000098e, 0x00000000);
- nv_icmd(dev, 0x0000098f, 0x00000000);
- nv_icmd(dev, 0x00000990, 0x00000000);
- nv_icmd(dev, 0x00000991, 0x00000000);
- nv_icmd(dev, 0x00000992, 0x00000000);
- nv_icmd(dev, 0x00000993, 0x00000000);
- nv_icmd(dev, 0x00000994, 0x00000000);
- nv_icmd(dev, 0x00000995, 0x00000000);
- nv_icmd(dev, 0x00000996, 0x00000000);
- nv_icmd(dev, 0x00000997, 0x00000000);
- nv_icmd(dev, 0x00000998, 0x00000000);
- nv_icmd(dev, 0x00000999, 0x00000000);
- nv_icmd(dev, 0x0000099a, 0x00000000);
- nv_icmd(dev, 0x0000099b, 0x00000000);
- nv_icmd(dev, 0x0000099c, 0x00000000);
- nv_icmd(dev, 0x0000099d, 0x00000000);
- nv_icmd(dev, 0x0000099e, 0x00000000);
- nv_icmd(dev, 0x0000099f, 0x00000000);
- nv_icmd(dev, 0x000009a0, 0x00000000);
- nv_icmd(dev, 0x000009a1, 0x00000000);
- nv_icmd(dev, 0x000009a2, 0x00000000);
- nv_icmd(dev, 0x000009a3, 0x00000000);
- nv_icmd(dev, 0x000009a4, 0x00000000);
- nv_icmd(dev, 0x000009a5, 0x00000000);
- nv_icmd(dev, 0x000009a6, 0x00000000);
- nv_icmd(dev, 0x000009a7, 0x00000000);
- nv_icmd(dev, 0x000009a8, 0x00000000);
- nv_icmd(dev, 0x000009a9, 0x00000000);
- nv_icmd(dev, 0x000009aa, 0x00000000);
- nv_icmd(dev, 0x000009ab, 0x00000000);
- nv_icmd(dev, 0x000009ac, 0x00000000);
- nv_icmd(dev, 0x000009ad, 0x00000000);
- nv_icmd(dev, 0x000009ae, 0x00000000);
- nv_icmd(dev, 0x000009af, 0x00000000);
- nv_icmd(dev, 0x000009b0, 0x00000000);
- nv_icmd(dev, 0x000009b1, 0x00000000);
- nv_icmd(dev, 0x000009b2, 0x00000000);
- nv_icmd(dev, 0x000009b3, 0x00000000);
- nv_icmd(dev, 0x000009b4, 0x00000000);
- nv_icmd(dev, 0x000009b5, 0x00000000);
- nv_icmd(dev, 0x000009b6, 0x00000000);
- nv_icmd(dev, 0x000009b7, 0x00000000);
- nv_icmd(dev, 0x000009b8, 0x00000000);
- nv_icmd(dev, 0x000009b9, 0x00000000);
- nv_icmd(dev, 0x000009ba, 0x00000000);
- nv_icmd(dev, 0x000009bb, 0x00000000);
- nv_icmd(dev, 0x000009bc, 0x00000000);
- nv_icmd(dev, 0x000009bd, 0x00000000);
- nv_icmd(dev, 0x000009be, 0x00000000);
- nv_icmd(dev, 0x000009bf, 0x00000000);
- nv_icmd(dev, 0x000009c0, 0x00000000);
- nv_icmd(dev, 0x000009c1, 0x00000000);
- nv_icmd(dev, 0x000009c2, 0x00000000);
- nv_icmd(dev, 0x000009c3, 0x00000000);
- nv_icmd(dev, 0x000009c4, 0x00000000);
- nv_icmd(dev, 0x000009c5, 0x00000000);
- nv_icmd(dev, 0x000009c6, 0x00000000);
- nv_icmd(dev, 0x000009c7, 0x00000000);
- nv_icmd(dev, 0x000009c8, 0x00000000);
- nv_icmd(dev, 0x000009c9, 0x00000000);
- nv_icmd(dev, 0x000009ca, 0x00000000);
- nv_icmd(dev, 0x000009cb, 0x00000000);
- nv_icmd(dev, 0x000009cc, 0x00000000);
- nv_icmd(dev, 0x000009cd, 0x00000000);
- nv_icmd(dev, 0x000009ce, 0x00000000);
- nv_icmd(dev, 0x000009cf, 0x00000000);
- nv_icmd(dev, 0x000009d0, 0x00000000);
- nv_icmd(dev, 0x000009d1, 0x00000000);
- nv_icmd(dev, 0x000009d2, 0x00000000);
- nv_icmd(dev, 0x000009d3, 0x00000000);
- nv_icmd(dev, 0x000009d4, 0x00000000);
- nv_icmd(dev, 0x000009d5, 0x00000000);
- nv_icmd(dev, 0x000009d6, 0x00000000);
- nv_icmd(dev, 0x000009d7, 0x00000000);
- nv_icmd(dev, 0x000009d8, 0x00000000);
- nv_icmd(dev, 0x000009d9, 0x00000000);
- nv_icmd(dev, 0x000009da, 0x00000000);
- nv_icmd(dev, 0x000009db, 0x00000000);
- nv_icmd(dev, 0x000009dc, 0x00000000);
- nv_icmd(dev, 0x000009dd, 0x00000000);
- nv_icmd(dev, 0x000009de, 0x00000000);
- nv_icmd(dev, 0x000009df, 0x00000000);
- nv_icmd(dev, 0x000009e0, 0x00000000);
- nv_icmd(dev, 0x000009e1, 0x00000000);
- nv_icmd(dev, 0x000009e2, 0x00000000);
- nv_icmd(dev, 0x000009e3, 0x00000000);
- nv_icmd(dev, 0x000009e4, 0x00000000);
- nv_icmd(dev, 0x000009e5, 0x00000000);
- nv_icmd(dev, 0x000009e6, 0x00000000);
- nv_icmd(dev, 0x000009e7, 0x00000000);
- nv_icmd(dev, 0x000009e8, 0x00000000);
- nv_icmd(dev, 0x000009e9, 0x00000000);
- nv_icmd(dev, 0x000009ea, 0x00000000);
- nv_icmd(dev, 0x000009eb, 0x00000000);
- nv_icmd(dev, 0x000009ec, 0x00000000);
- nv_icmd(dev, 0x000009ed, 0x00000000);
- nv_icmd(dev, 0x000009ee, 0x00000000);
- nv_icmd(dev, 0x000009ef, 0x00000000);
- nv_icmd(dev, 0x000009f0, 0x00000000);
- nv_icmd(dev, 0x000009f1, 0x00000000);
- nv_icmd(dev, 0x000009f2, 0x00000000);
- nv_icmd(dev, 0x000009f3, 0x00000000);
- nv_icmd(dev, 0x000009f4, 0x00000000);
- nv_icmd(dev, 0x000009f5, 0x00000000);
- nv_icmd(dev, 0x000009f6, 0x00000000);
- nv_icmd(dev, 0x000009f7, 0x00000000);
- nv_icmd(dev, 0x000009f8, 0x00000000);
- nv_icmd(dev, 0x000009f9, 0x00000000);
- nv_icmd(dev, 0x000009fa, 0x00000000);
- nv_icmd(dev, 0x000009fb, 0x00000000);
- nv_icmd(dev, 0x000009fc, 0x00000000);
- nv_icmd(dev, 0x000009fd, 0x00000000);
- nv_icmd(dev, 0x000009fe, 0x00000000);
- nv_icmd(dev, 0x000009ff, 0x00000000);
- nv_icmd(dev, 0x00000468, 0x00000004);
- nv_icmd(dev, 0x0000046c, 0x00000001);
- nv_icmd(dev, 0x00000470, 0x00000000);
- nv_icmd(dev, 0x00000471, 0x00000000);
- nv_icmd(dev, 0x00000472, 0x00000000);
- nv_icmd(dev, 0x00000473, 0x00000000);
- nv_icmd(dev, 0x00000474, 0x00000000);
- nv_icmd(dev, 0x00000475, 0x00000000);
- nv_icmd(dev, 0x00000476, 0x00000000);
- nv_icmd(dev, 0x00000477, 0x00000000);
- nv_icmd(dev, 0x00000478, 0x00000000);
- nv_icmd(dev, 0x00000479, 0x00000000);
- nv_icmd(dev, 0x0000047a, 0x00000000);
- nv_icmd(dev, 0x0000047b, 0x00000000);
- nv_icmd(dev, 0x0000047c, 0x00000000);
- nv_icmd(dev, 0x0000047d, 0x00000000);
- nv_icmd(dev, 0x0000047e, 0x00000000);
- nv_icmd(dev, 0x0000047f, 0x00000000);
- nv_icmd(dev, 0x00000480, 0x00000000);
- nv_icmd(dev, 0x00000481, 0x00000000);
- nv_icmd(dev, 0x00000482, 0x00000000);
- nv_icmd(dev, 0x00000483, 0x00000000);
- nv_icmd(dev, 0x00000484, 0x00000000);
- nv_icmd(dev, 0x00000485, 0x00000000);
- nv_icmd(dev, 0x00000486, 0x00000000);
- nv_icmd(dev, 0x00000487, 0x00000000);
- nv_icmd(dev, 0x00000488, 0x00000000);
- nv_icmd(dev, 0x00000489, 0x00000000);
- nv_icmd(dev, 0x0000048a, 0x00000000);
- nv_icmd(dev, 0x0000048b, 0x00000000);
- nv_icmd(dev, 0x0000048c, 0x00000000);
- nv_icmd(dev, 0x0000048d, 0x00000000);
- nv_icmd(dev, 0x0000048e, 0x00000000);
- nv_icmd(dev, 0x0000048f, 0x00000000);
- nv_icmd(dev, 0x00000490, 0x00000000);
- nv_icmd(dev, 0x00000491, 0x00000000);
- nv_icmd(dev, 0x00000492, 0x00000000);
- nv_icmd(dev, 0x00000493, 0x00000000);
- nv_icmd(dev, 0x00000494, 0x00000000);
- nv_icmd(dev, 0x00000495, 0x00000000);
- nv_icmd(dev, 0x00000496, 0x00000000);
- nv_icmd(dev, 0x00000497, 0x00000000);
- nv_icmd(dev, 0x00000498, 0x00000000);
- nv_icmd(dev, 0x00000499, 0x00000000);
- nv_icmd(dev, 0x0000049a, 0x00000000);
- nv_icmd(dev, 0x0000049b, 0x00000000);
- nv_icmd(dev, 0x0000049c, 0x00000000);
- nv_icmd(dev, 0x0000049d, 0x00000000);
- nv_icmd(dev, 0x0000049e, 0x00000000);
- nv_icmd(dev, 0x0000049f, 0x00000000);
- nv_icmd(dev, 0x000004a0, 0x00000000);
- nv_icmd(dev, 0x000004a1, 0x00000000);
- nv_icmd(dev, 0x000004a2, 0x00000000);
- nv_icmd(dev, 0x000004a3, 0x00000000);
- nv_icmd(dev, 0x000004a4, 0x00000000);
- nv_icmd(dev, 0x000004a5, 0x00000000);
- nv_icmd(dev, 0x000004a6, 0x00000000);
- nv_icmd(dev, 0x000004a7, 0x00000000);
- nv_icmd(dev, 0x000004a8, 0x00000000);
- nv_icmd(dev, 0x000004a9, 0x00000000);
- nv_icmd(dev, 0x000004aa, 0x00000000);
- nv_icmd(dev, 0x000004ab, 0x00000000);
- nv_icmd(dev, 0x000004ac, 0x00000000);
- nv_icmd(dev, 0x000004ad, 0x00000000);
- nv_icmd(dev, 0x000004ae, 0x00000000);
- nv_icmd(dev, 0x000004af, 0x00000000);
- nv_icmd(dev, 0x000004b0, 0x00000000);
- nv_icmd(dev, 0x000004b1, 0x00000000);
- nv_icmd(dev, 0x000004b2, 0x00000000);
- nv_icmd(dev, 0x000004b3, 0x00000000);
- nv_icmd(dev, 0x000004b4, 0x00000000);
- nv_icmd(dev, 0x000004b5, 0x00000000);
- nv_icmd(dev, 0x000004b6, 0x00000000);
- nv_icmd(dev, 0x000004b7, 0x00000000);
- nv_icmd(dev, 0x000004b8, 0x00000000);
- nv_icmd(dev, 0x000004b9, 0x00000000);
- nv_icmd(dev, 0x000004ba, 0x00000000);
- nv_icmd(dev, 0x000004bb, 0x00000000);
- nv_icmd(dev, 0x000004bc, 0x00000000);
- nv_icmd(dev, 0x000004bd, 0x00000000);
- nv_icmd(dev, 0x000004be, 0x00000000);
- nv_icmd(dev, 0x000004bf, 0x00000000);
- nv_icmd(dev, 0x000004c0, 0x00000000);
- nv_icmd(dev, 0x000004c1, 0x00000000);
- nv_icmd(dev, 0x000004c2, 0x00000000);
- nv_icmd(dev, 0x000004c3, 0x00000000);
- nv_icmd(dev, 0x000004c4, 0x00000000);
- nv_icmd(dev, 0x000004c5, 0x00000000);
- nv_icmd(dev, 0x000004c6, 0x00000000);
- nv_icmd(dev, 0x000004c7, 0x00000000);
- nv_icmd(dev, 0x000004c8, 0x00000000);
- nv_icmd(dev, 0x000004c9, 0x00000000);
- nv_icmd(dev, 0x000004ca, 0x00000000);
- nv_icmd(dev, 0x000004cb, 0x00000000);
- nv_icmd(dev, 0x000004cc, 0x00000000);
- nv_icmd(dev, 0x000004cd, 0x00000000);
- nv_icmd(dev, 0x000004ce, 0x00000000);
- nv_icmd(dev, 0x000004cf, 0x00000000);
- nv_icmd(dev, 0x00000510, 0x3f800000);
- nv_icmd(dev, 0x00000511, 0x3f800000);
- nv_icmd(dev, 0x00000512, 0x3f800000);
- nv_icmd(dev, 0x00000513, 0x3f800000);
- nv_icmd(dev, 0x00000514, 0x3f800000);
- nv_icmd(dev, 0x00000515, 0x3f800000);
- nv_icmd(dev, 0x00000516, 0x3f800000);
- nv_icmd(dev, 0x00000517, 0x3f800000);
- nv_icmd(dev, 0x00000518, 0x3f800000);
- nv_icmd(dev, 0x00000519, 0x3f800000);
- nv_icmd(dev, 0x0000051a, 0x3f800000);
- nv_icmd(dev, 0x0000051b, 0x3f800000);
- nv_icmd(dev, 0x0000051c, 0x3f800000);
- nv_icmd(dev, 0x0000051d, 0x3f800000);
- nv_icmd(dev, 0x0000051e, 0x3f800000);
- nv_icmd(dev, 0x0000051f, 0x3f800000);
- nv_icmd(dev, 0x00000520, 0x000002b6);
- nv_icmd(dev, 0x00000529, 0x00000001);
- nv_icmd(dev, 0x00000530, 0xffff0000);
- nv_icmd(dev, 0x00000531, 0xffff0000);
- nv_icmd(dev, 0x00000532, 0xffff0000);
- nv_icmd(dev, 0x00000533, 0xffff0000);
- nv_icmd(dev, 0x00000534, 0xffff0000);
- nv_icmd(dev, 0x00000535, 0xffff0000);
- nv_icmd(dev, 0x00000536, 0xffff0000);
- nv_icmd(dev, 0x00000537, 0xffff0000);
- nv_icmd(dev, 0x00000538, 0xffff0000);
- nv_icmd(dev, 0x00000539, 0xffff0000);
- nv_icmd(dev, 0x0000053a, 0xffff0000);
- nv_icmd(dev, 0x0000053b, 0xffff0000);
- nv_icmd(dev, 0x0000053c, 0xffff0000);
- nv_icmd(dev, 0x0000053d, 0xffff0000);
- nv_icmd(dev, 0x0000053e, 0xffff0000);
- nv_icmd(dev, 0x0000053f, 0xffff0000);
- nv_icmd(dev, 0x00000585, 0x0000003f);
- nv_icmd(dev, 0x00000576, 0x00000003);
- if (dev_priv->chipset == 0xc1 ||
- dev_priv->chipset == 0xd9)
- nv_icmd(dev, 0x0000057b, 0x00000059);
- nv_icmd(dev, 0x00000586, 0x00000040);
- nv_icmd(dev, 0x00000582, 0x00000080);
- nv_icmd(dev, 0x00000583, 0x00000080);
- nv_icmd(dev, 0x000005c2, 0x00000001);
- nv_icmd(dev, 0x00000638, 0x00000001);
- nv_icmd(dev, 0x00000639, 0x00000001);
- nv_icmd(dev, 0x0000063a, 0x00000002);
- nv_icmd(dev, 0x0000063b, 0x00000001);
- nv_icmd(dev, 0x0000063c, 0x00000001);
- nv_icmd(dev, 0x0000063d, 0x00000002);
- nv_icmd(dev, 0x0000063e, 0x00000001);
- nv_icmd(dev, 0x000008b8, 0x00000001);
- nv_icmd(dev, 0x000008b9, 0x00000001);
- nv_icmd(dev, 0x000008ba, 0x00000001);
- nv_icmd(dev, 0x000008bb, 0x00000001);
- nv_icmd(dev, 0x000008bc, 0x00000001);
- nv_icmd(dev, 0x000008bd, 0x00000001);
- nv_icmd(dev, 0x000008be, 0x00000001);
- nv_icmd(dev, 0x000008bf, 0x00000001);
- nv_icmd(dev, 0x00000900, 0x00000001);
- nv_icmd(dev, 0x00000901, 0x00000001);
- nv_icmd(dev, 0x00000902, 0x00000001);
- nv_icmd(dev, 0x00000903, 0x00000001);
- nv_icmd(dev, 0x00000904, 0x00000001);
- nv_icmd(dev, 0x00000905, 0x00000001);
- nv_icmd(dev, 0x00000906, 0x00000001);
- nv_icmd(dev, 0x00000907, 0x00000001);
- nv_icmd(dev, 0x00000908, 0x00000002);
- nv_icmd(dev, 0x00000909, 0x00000002);
- nv_icmd(dev, 0x0000090a, 0x00000002);
- nv_icmd(dev, 0x0000090b, 0x00000002);
- nv_icmd(dev, 0x0000090c, 0x00000002);
- nv_icmd(dev, 0x0000090d, 0x00000002);
- nv_icmd(dev, 0x0000090e, 0x00000002);
- nv_icmd(dev, 0x0000090f, 0x00000002);
- nv_icmd(dev, 0x00000910, 0x00000001);
- nv_icmd(dev, 0x00000911, 0x00000001);
- nv_icmd(dev, 0x00000912, 0x00000001);
- nv_icmd(dev, 0x00000913, 0x00000001);
- nv_icmd(dev, 0x00000914, 0x00000001);
- nv_icmd(dev, 0x00000915, 0x00000001);
- nv_icmd(dev, 0x00000916, 0x00000001);
- nv_icmd(dev, 0x00000917, 0x00000001);
- nv_icmd(dev, 0x00000918, 0x00000001);
- nv_icmd(dev, 0x00000919, 0x00000001);
- nv_icmd(dev, 0x0000091a, 0x00000001);
- nv_icmd(dev, 0x0000091b, 0x00000001);
- nv_icmd(dev, 0x0000091c, 0x00000001);
- nv_icmd(dev, 0x0000091d, 0x00000001);
- nv_icmd(dev, 0x0000091e, 0x00000001);
- nv_icmd(dev, 0x0000091f, 0x00000001);
- nv_icmd(dev, 0x00000920, 0x00000002);
- nv_icmd(dev, 0x00000921, 0x00000002);
- nv_icmd(dev, 0x00000922, 0x00000002);
- nv_icmd(dev, 0x00000923, 0x00000002);
- nv_icmd(dev, 0x00000924, 0x00000002);
- nv_icmd(dev, 0x00000925, 0x00000002);
- nv_icmd(dev, 0x00000926, 0x00000002);
- nv_icmd(dev, 0x00000927, 0x00000002);
- nv_icmd(dev, 0x00000928, 0x00000001);
- nv_icmd(dev, 0x00000929, 0x00000001);
- nv_icmd(dev, 0x0000092a, 0x00000001);
- nv_icmd(dev, 0x0000092b, 0x00000001);
- nv_icmd(dev, 0x0000092c, 0x00000001);
- nv_icmd(dev, 0x0000092d, 0x00000001);
- nv_icmd(dev, 0x0000092e, 0x00000001);
- nv_icmd(dev, 0x0000092f, 0x00000001);
- nv_icmd(dev, 0x00000648, 0x00000001);
- nv_icmd(dev, 0x00000649, 0x00000001);
- nv_icmd(dev, 0x0000064a, 0x00000001);
- nv_icmd(dev, 0x0000064b, 0x00000001);
- nv_icmd(dev, 0x0000064c, 0x00000001);
- nv_icmd(dev, 0x0000064d, 0x00000001);
- nv_icmd(dev, 0x0000064e, 0x00000001);
- nv_icmd(dev, 0x0000064f, 0x00000001);
- nv_icmd(dev, 0x00000650, 0x00000001);
- nv_icmd(dev, 0x00000658, 0x0000000f);
- nv_icmd(dev, 0x000007ff, 0x0000000a);
- nv_icmd(dev, 0x0000066a, 0x40000000);
- nv_icmd(dev, 0x0000066b, 0x10000000);
- nv_icmd(dev, 0x0000066c, 0xffff0000);
- nv_icmd(dev, 0x0000066d, 0xffff0000);
- nv_icmd(dev, 0x000007af, 0x00000008);
- nv_icmd(dev, 0x000007b0, 0x00000008);
- nv_icmd(dev, 0x000007f6, 0x00000001);
- nv_icmd(dev, 0x000006b2, 0x00000055);
- nv_icmd(dev, 0x000007ad, 0x00000003);
- nv_icmd(dev, 0x00000937, 0x00000001);
- nv_icmd(dev, 0x00000971, 0x00000008);
- nv_icmd(dev, 0x00000972, 0x00000040);
- nv_icmd(dev, 0x00000973, 0x0000012c);
- nv_icmd(dev, 0x0000097c, 0x00000040);
- nv_icmd(dev, 0x00000979, 0x00000003);
- nv_icmd(dev, 0x00000975, 0x00000020);
- nv_icmd(dev, 0x00000976, 0x00000001);
- nv_icmd(dev, 0x00000977, 0x00000020);
- nv_icmd(dev, 0x00000978, 0x00000001);
- nv_icmd(dev, 0x00000957, 0x00000003);
- nv_icmd(dev, 0x0000095e, 0x20164010);
- nv_icmd(dev, 0x0000095f, 0x00000020);
- if (dev_priv->chipset == 0xd9)
- nv_icmd(dev, 0x0000097d, 0x00000020);
- nv_icmd(dev, 0x00000683, 0x00000006);
- nv_icmd(dev, 0x00000685, 0x003fffff);
- nv_icmd(dev, 0x00000687, 0x00000c48);
- nv_icmd(dev, 0x000006a0, 0x00000005);
- nv_icmd(dev, 0x00000840, 0x00300008);
- nv_icmd(dev, 0x00000841, 0x04000080);
- nv_icmd(dev, 0x00000842, 0x00300008);
- nv_icmd(dev, 0x00000843, 0x04000080);
- nv_icmd(dev, 0x00000818, 0x00000000);
- nv_icmd(dev, 0x00000819, 0x00000000);
- nv_icmd(dev, 0x0000081a, 0x00000000);
- nv_icmd(dev, 0x0000081b, 0x00000000);
- nv_icmd(dev, 0x0000081c, 0x00000000);
- nv_icmd(dev, 0x0000081d, 0x00000000);
- nv_icmd(dev, 0x0000081e, 0x00000000);
- nv_icmd(dev, 0x0000081f, 0x00000000);
- nv_icmd(dev, 0x00000848, 0x00000000);
- nv_icmd(dev, 0x00000849, 0x00000000);
- nv_icmd(dev, 0x0000084a, 0x00000000);
- nv_icmd(dev, 0x0000084b, 0x00000000);
- nv_icmd(dev, 0x0000084c, 0x00000000);
- nv_icmd(dev, 0x0000084d, 0x00000000);
- nv_icmd(dev, 0x0000084e, 0x00000000);
- nv_icmd(dev, 0x0000084f, 0x00000000);
- nv_icmd(dev, 0x00000850, 0x00000000);
- nv_icmd(dev, 0x00000851, 0x00000000);
- nv_icmd(dev, 0x00000852, 0x00000000);
- nv_icmd(dev, 0x00000853, 0x00000000);
- nv_icmd(dev, 0x00000854, 0x00000000);
- nv_icmd(dev, 0x00000855, 0x00000000);
- nv_icmd(dev, 0x00000856, 0x00000000);
- nv_icmd(dev, 0x00000857, 0x00000000);
- nv_icmd(dev, 0x00000738, 0x00000000);
- nv_icmd(dev, 0x000006aa, 0x00000001);
- nv_icmd(dev, 0x000006ab, 0x00000002);
- nv_icmd(dev, 0x000006ac, 0x00000080);
- nv_icmd(dev, 0x000006ad, 0x00000100);
- nv_icmd(dev, 0x000006ae, 0x00000100);
- nv_icmd(dev, 0x000006b1, 0x00000011);
- nv_icmd(dev, 0x000006bb, 0x000000cf);
- nv_icmd(dev, 0x000006ce, 0x2a712488);
- nv_icmd(dev, 0x00000739, 0x4085c000);
- nv_icmd(dev, 0x0000073a, 0x00000080);
- nv_icmd(dev, 0x00000786, 0x80000100);
- nv_icmd(dev, 0x0000073c, 0x00010100);
- nv_icmd(dev, 0x0000073d, 0x02800000);
- nv_icmd(dev, 0x00000787, 0x000000cf);
- nv_icmd(dev, 0x0000078c, 0x00000008);
- nv_icmd(dev, 0x00000792, 0x00000001);
- nv_icmd(dev, 0x00000794, 0x00000001);
- nv_icmd(dev, 0x00000795, 0x00000001);
- nv_icmd(dev, 0x00000796, 0x00000001);
- nv_icmd(dev, 0x00000797, 0x000000cf);
- nv_icmd(dev, 0x00000836, 0x00000001);
- nv_icmd(dev, 0x0000079a, 0x00000002);
- nv_icmd(dev, 0x00000833, 0x04444480);
- nv_icmd(dev, 0x000007a1, 0x00000001);
- nv_icmd(dev, 0x000007a3, 0x00000001);
- nv_icmd(dev, 0x000007a4, 0x00000001);
- nv_icmd(dev, 0x000007a5, 0x00000001);
- nv_icmd(dev, 0x00000831, 0x00000004);
- nv_icmd(dev, 0x0000080c, 0x00000002);
- nv_icmd(dev, 0x0000080d, 0x00000100);
- nv_icmd(dev, 0x0000080e, 0x00000100);
- nv_icmd(dev, 0x0000080f, 0x00000001);
- nv_icmd(dev, 0x00000823, 0x00000002);
- nv_icmd(dev, 0x00000824, 0x00000100);
- nv_icmd(dev, 0x00000825, 0x00000100);
- nv_icmd(dev, 0x00000826, 0x00000001);
- nv_icmd(dev, 0x0000095d, 0x00000001);
- nv_icmd(dev, 0x0000082b, 0x00000004);
- nv_icmd(dev, 0x00000942, 0x00010001);
- nv_icmd(dev, 0x00000943, 0x00000001);
- nv_icmd(dev, 0x00000944, 0x00000022);
- nv_icmd(dev, 0x000007c5, 0x00010001);
- nv_icmd(dev, 0x00000834, 0x00000001);
- nv_icmd(dev, 0x000007c7, 0x00000001);
- nv_icmd(dev, 0x0000c1b0, 0x0000000f);
- nv_icmd(dev, 0x0000c1b1, 0x0000000f);
- nv_icmd(dev, 0x0000c1b2, 0x0000000f);
- nv_icmd(dev, 0x0000c1b3, 0x0000000f);
- nv_icmd(dev, 0x0000c1b4, 0x0000000f);
- nv_icmd(dev, 0x0000c1b5, 0x0000000f);
- nv_icmd(dev, 0x0000c1b6, 0x0000000f);
- nv_icmd(dev, 0x0000c1b7, 0x0000000f);
- nv_icmd(dev, 0x0000c1b8, 0x0fac6881);
- nv_icmd(dev, 0x0000c1b9, 0x00fac688);
- nv_icmd(dev, 0x0001e100, 0x00000001);
- nv_icmd(dev, 0x00001000, 0x00000002);
- nv_icmd(dev, 0x000006aa, 0x00000001);
- nv_icmd(dev, 0x000006ad, 0x00000100);
- nv_icmd(dev, 0x000006ae, 0x00000100);
- nv_icmd(dev, 0x000006b1, 0x00000011);
- nv_icmd(dev, 0x0000078c, 0x00000008);
- nv_icmd(dev, 0x00000792, 0x00000001);
- nv_icmd(dev, 0x00000794, 0x00000001);
- nv_icmd(dev, 0x00000795, 0x00000001);
- nv_icmd(dev, 0x00000796, 0x00000001);
- nv_icmd(dev, 0x00000797, 0x000000cf);
- nv_icmd(dev, 0x0000079a, 0x00000002);
- nv_icmd(dev, 0x00000833, 0x04444480);
- nv_icmd(dev, 0x000007a1, 0x00000001);
- nv_icmd(dev, 0x000007a3, 0x00000001);
- nv_icmd(dev, 0x000007a4, 0x00000001);
- nv_icmd(dev, 0x000007a5, 0x00000001);
- nv_icmd(dev, 0x00000831, 0x00000004);
- nv_icmd(dev, 0x0001e100, 0x00000001);
- nv_icmd(dev, 0x00001000, 0x00000014);
- nv_icmd(dev, 0x00000351, 0x00000100);
- nv_icmd(dev, 0x00000957, 0x00000003);
- nv_icmd(dev, 0x0000095d, 0x00000001);
- nv_icmd(dev, 0x0000082b, 0x00000004);
- nv_icmd(dev, 0x00000942, 0x00010001);
- nv_icmd(dev, 0x00000943, 0x00000001);
- nv_icmd(dev, 0x000007c5, 0x00010001);
- nv_icmd(dev, 0x00000834, 0x00000001);
- nv_icmd(dev, 0x000007c7, 0x00000001);
- nv_icmd(dev, 0x0001e100, 0x00000001);
- nv_icmd(dev, 0x00001000, 0x00000001);
- nv_icmd(dev, 0x0000080c, 0x00000002);
- nv_icmd(dev, 0x0000080d, 0x00000100);
- nv_icmd(dev, 0x0000080e, 0x00000100);
- nv_icmd(dev, 0x0000080f, 0x00000001);
- nv_icmd(dev, 0x00000823, 0x00000002);
- nv_icmd(dev, 0x00000824, 0x00000100);
- nv_icmd(dev, 0x00000825, 0x00000100);
- nv_icmd(dev, 0x00000826, 0x00000001);
- nv_icmd(dev, 0x0001e100, 0x00000001);
- nv_wr32(dev, 0x400208, 0x00000000);
- nv_wr32(dev, 0x404154, 0x00000400);
-
- nvc0_grctx_generate_9097(dev);
- if (fermi >= 0x9197)
- nvc0_grctx_generate_9197(dev);
- if (fermi >= 0x9297)
- nvc0_grctx_generate_9297(dev);
- nvc0_grctx_generate_902d(dev);
- nvc0_grctx_generate_9039(dev);
- nvc0_grctx_generate_90c0(dev);
-
- nv_wr32(dev, 0x000260, r000260);
- return 0;
-}
+++ /dev/null
-/* fuc microcode for nvc0 PGRAPH/GPC
- *
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-/* To build:
- * m4 nvc0_grgpc.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grgpc.fuc.h
- */
-
-/* TODO
- * - bracket certain functions with scratch writes, useful for debugging
- * - watchdog timer around ctx operations
- */
-
-.section #nvc0_grgpc_data
-include(`nvc0_graph.fuc')
-gpc_id: .b32 0
-gpc_mmio_list_head: .b32 0
-gpc_mmio_list_tail: .b32 0
-
-tpc_count: .b32 0
-tpc_mask: .b32 0
-tpc_mmio_list_head: .b32 0
-tpc_mmio_list_tail: .b32 0
-
-cmd_queue: queue_init
-
-// chipset descriptions
-chipsets:
-.b8 0xc0 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc0_tpc_mmio_tail
-.b8 0xc1 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc1_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc1_tpc_mmio_tail
-.b8 0xc3 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc3_tpc_mmio_tail
-.b8 0xc4 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc3_tpc_mmio_tail
-.b8 0xc8 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc0_tpc_mmio_tail
-.b8 0xce 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvc3_tpc_mmio_tail
-.b8 0xcf 0 0 0
-.b16 #nvc0_gpc_mmio_head
-.b16 #nvc0_gpc_mmio_tail
-.b16 #nvc0_tpc_mmio_head
-.b16 #nvcf_tpc_mmio_tail
-.b8 0xd9 0 0 0
-.b16 #nvd9_gpc_mmio_head
-.b16 #nvd9_gpc_mmio_tail
-.b16 #nvd9_tpc_mmio_head
-.b16 #nvd9_tpc_mmio_tail
-.b8 0 0 0 0
-
-// GPC mmio lists
-nvc0_gpc_mmio_head:
-mmctx_data(0x000380, 1)
-mmctx_data(0x000400, 6)
-mmctx_data(0x000450, 9)
-mmctx_data(0x000600, 1)
-mmctx_data(0x000684, 1)
-mmctx_data(0x000700, 5)
-mmctx_data(0x000800, 1)
-mmctx_data(0x000808, 3)
-mmctx_data(0x000828, 1)
-mmctx_data(0x000830, 1)
-mmctx_data(0x0008d8, 1)
-mmctx_data(0x0008e0, 1)
-mmctx_data(0x0008e8, 6)
-mmctx_data(0x00091c, 1)
-mmctx_data(0x000924, 3)
-mmctx_data(0x000b00, 1)
-mmctx_data(0x000b08, 6)
-mmctx_data(0x000bb8, 1)
-mmctx_data(0x000c08, 1)
-mmctx_data(0x000c10, 8)
-mmctx_data(0x000c80, 1)
-mmctx_data(0x000c8c, 1)
-mmctx_data(0x001000, 3)
-mmctx_data(0x001014, 1)
-nvc0_gpc_mmio_tail:
-mmctx_data(0x000c6c, 1);
-nvc1_gpc_mmio_tail:
-
-nvd9_gpc_mmio_head:
-mmctx_data(0x000380, 1)
-mmctx_data(0x000400, 2)
-mmctx_data(0x00040c, 3)
-mmctx_data(0x000450, 9)
-mmctx_data(0x000600, 1)
-mmctx_data(0x000684, 1)
-mmctx_data(0x000700, 5)
-mmctx_data(0x000800, 1)
-mmctx_data(0x000808, 3)
-mmctx_data(0x000828, 1)
-mmctx_data(0x000830, 1)
-mmctx_data(0x0008d8, 1)
-mmctx_data(0x0008e0, 1)
-mmctx_data(0x0008e8, 6)
-mmctx_data(0x00091c, 1)
-mmctx_data(0x000924, 3)
-mmctx_data(0x000b00, 1)
-mmctx_data(0x000b08, 6)
-mmctx_data(0x000bb8, 1)
-mmctx_data(0x000c08, 1)
-mmctx_data(0x000c10, 8)
-mmctx_data(0x000c6c, 1)
-mmctx_data(0x000c80, 1)
-mmctx_data(0x000c8c, 1)
-mmctx_data(0x001000, 3)
-mmctx_data(0x001014, 1)
-nvd9_gpc_mmio_tail:
-
-// TPC mmio lists
-nvc0_tpc_mmio_head:
-mmctx_data(0x000018, 1)
-mmctx_data(0x00003c, 1)
-mmctx_data(0x000048, 1)
-mmctx_data(0x000064, 1)
-mmctx_data(0x000088, 1)
-mmctx_data(0x000200, 6)
-mmctx_data(0x00021c, 2)
-mmctx_data(0x000300, 6)
-mmctx_data(0x0003d0, 1)
-mmctx_data(0x0003e0, 2)
-mmctx_data(0x000400, 3)
-mmctx_data(0x000420, 1)
-mmctx_data(0x0004b0, 1)
-mmctx_data(0x0004e8, 1)
-mmctx_data(0x0004f4, 1)
-mmctx_data(0x000520, 2)
-mmctx_data(0x000604, 4)
-mmctx_data(0x000644, 20)
-mmctx_data(0x000698, 1)
-mmctx_data(0x000750, 2)
-nvc0_tpc_mmio_tail:
-mmctx_data(0x000758, 1)
-mmctx_data(0x0002c4, 1)
-mmctx_data(0x0006e0, 1)
-nvcf_tpc_mmio_tail:
-mmctx_data(0x0004bc, 1)
-nvc3_tpc_mmio_tail:
-mmctx_data(0x000544, 1)
-nvc1_tpc_mmio_tail:
-
-nvd9_tpc_mmio_head:
-mmctx_data(0x000018, 1)
-mmctx_data(0x00003c, 1)
-mmctx_data(0x000048, 1)
-mmctx_data(0x000064, 1)
-mmctx_data(0x000088, 1)
-mmctx_data(0x000200, 6)
-mmctx_data(0x00021c, 2)
-mmctx_data(0x0002c4, 1)
-mmctx_data(0x000300, 6)
-mmctx_data(0x0003d0, 1)
-mmctx_data(0x0003e0, 2)
-mmctx_data(0x000400, 3)
-mmctx_data(0x000420, 3)
-mmctx_data(0x0004b0, 1)
-mmctx_data(0x0004e8, 1)
-mmctx_data(0x0004f4, 1)
-mmctx_data(0x000520, 2)
-mmctx_data(0x000544, 1)
-mmctx_data(0x000604, 4)
-mmctx_data(0x000644, 20)
-mmctx_data(0x000698, 1)
-mmctx_data(0x0006e0, 1)
-mmctx_data(0x000750, 3)
-nvd9_tpc_mmio_tail:
-
-.section #nvc0_grgpc_code
-bra #init
-define(`include_code')
-include(`nvc0_graph.fuc')
-
-// reports an exception to the host
-//
-// In: $r15 error code (see nvc0_graph.fuc)
-//
-error:
- push $r14
- mov $r14 -0x67ec // 0x9814
- sethi $r14 0x400000
- call #nv_wr32 // HUB_CTXCTL_CC_SCRATCH[5] = error code
- add b32 $r14 0x41c
- mov $r15 1
- call #nv_wr32 // HUB_CTXCTL_INTR_UP_SET
- pop $r14
- ret
-
-// GPC fuc initialisation, executed by triggering ucode start, will
-// fall through to main loop after completion.
-//
-// Input:
-// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
-// CC_SCRATCH[1]: context base
-//
-// Output:
-// CC_SCRATCH[0]:
-// 31:31: set to signal completion
-// CC_SCRATCH[1]:
-// 31:0: GPC context size
-//
-init:
- clear b32 $r0
- mov $sp $r0
-
- // enable fifo access
- mov $r1 0x1200
- mov $r2 2
- iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
-
- // setup i0 handler, and route all interrupts to it
- mov $r1 #ih
- mov $iv0 $r1
- mov $r1 0x400
- iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
-
- // enable fifo interrupt
- mov $r2 4
- iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
-
- // enable interrupts
- bset $flags ie0
-
- // figure out which GPC we are, and how many TPCs we have
- mov $r1 0x608
- shl b32 $r1 6
- iord $r2 I[$r1 + 0x000] // UNITS
- mov $r3 1
- and $r2 0x1f
- shl b32 $r3 $r2
- sub b32 $r3 1
- st b32 D[$r0 + #tpc_count] $r2
- st b32 D[$r0 + #tpc_mask] $r3
- add b32 $r1 0x400
- iord $r2 I[$r1 + 0x000] // MYINDEX
- st b32 D[$r0 + #gpc_id] $r2
-
- // find context data for this chipset
- mov $r2 0x800
- shl b32 $r2 6
- iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
- mov $r1 #chipsets - 12
- init_find_chipset:
- add b32 $r1 12
- ld b32 $r3 D[$r1 + 0x00]
- cmpu b32 $r3 $r2
- bra e #init_context
- cmpu b32 $r3 0
- bra ne #init_find_chipset
- // unknown chipset
- ret
-
- // initialise context base, and size tracking
- init_context:
- mov $r2 0x800
- shl b32 $r2 6
- iord $r2 I[$r2 + 0x100] // CC_SCRATCH[1], initial base
- clear b32 $r3 // track GPC context size here
-
- // set mmctx base addresses now so we don't have to do it later,
- // they don't currently ever change
- mov $r4 0x700
- shl b32 $r4 6
- shr b32 $r5 $r2 8
- iowr I[$r4 + 0x000] $r5 // MMCTX_SAVE_SWBASE
- iowr I[$r4 + 0x100] $r5 // MMCTX_LOAD_SWBASE
-
- // calculate GPC mmio context size, store the chipset-specific
- // mmio list pointers somewhere we can get at them later without
- // re-parsing the chipset list
- clear b32 $r14
- clear b32 $r15
- ld b16 $r14 D[$r1 + 4]
- ld b16 $r15 D[$r1 + 6]
- st b16 D[$r0 + #gpc_mmio_list_head] $r14
- st b16 D[$r0 + #gpc_mmio_list_tail] $r15
- call #mmctx_size
- add b32 $r2 $r15
- add b32 $r3 $r15
-
- // calculate per-TPC mmio context size, store the list pointers
- ld b16 $r14 D[$r1 + 8]
- ld b16 $r15 D[$r1 + 10]
- st b16 D[$r0 + #tpc_mmio_list_head] $r14
- st b16 D[$r0 + #tpc_mmio_list_tail] $r15
- call #mmctx_size
- ld b32 $r14 D[$r0 + #tpc_count]
- mulu $r14 $r15
- add b32 $r2 $r14
- add b32 $r3 $r14
-
- // round up base/size to 256 byte boundary (for strand SWBASE)
- add b32 $r4 0x1300
- shr b32 $r3 2
- iowr I[$r4 + 0x000] $r3 // MMCTX_LOAD_COUNT, wtf for?!?
- shr b32 $r2 8
- shr b32 $r3 6
- add b32 $r2 1
- add b32 $r3 1
- shl b32 $r2 8
- shl b32 $r3 8
-
- // calculate size of strand context data
- mov b32 $r15 $r2
- call #strand_ctx_init
- add b32 $r3 $r15
-
- // save context size, and tell HUB we're done
- mov $r1 0x800
- shl b32 $r1 6
- iowr I[$r1 + 0x100] $r3 // CC_SCRATCH[1] = context size
- add b32 $r1 0x800
- clear b32 $r2
- bset $r2 31
- iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
-
-// Main program loop, very simple, sleeps until woken up by the interrupt
-// handler, pulls a command from the queue and executes its handler
-//
-main:
- bset $flags $p0
- sleep $p0
- mov $r13 #cmd_queue
- call #queue_get
- bra $p1 #main
-
- // 0x0000-0x0003 are all context transfers
- cmpu b32 $r14 0x04
- bra nc #main_not_ctx_xfer
- // fetch $flags and mask off $p1/$p2
- mov $r1 $flags
- mov $r2 0x0006
- not b32 $r2
- and $r1 $r2
- // set $p1/$p2 according to transfer type
- shl b32 $r14 1
- or $r1 $r14
- mov $flags $r1
- // transfer context data
- call #ctx_xfer
- bra #main
-
- main_not_ctx_xfer:
- shl b32 $r15 $r14 16
- or $r15 E_BAD_COMMAND
- call #error
- bra #main
-
-// interrupt handler
-ih:
- push $r8
- mov $r8 $flags
- push $r8
- push $r9
- push $r10
- push $r11
- push $r13
- push $r14
- push $r15
-
- // incoming fifo command?
- iord $r10 I[$r0 + 0x200] // INTR
- and $r11 $r10 0x00000004
- bra e #ih_no_fifo
- // queue incoming fifo command for later processing
- mov $r11 0x1900
- mov $r13 #cmd_queue
- iord $r14 I[$r11 + 0x100] // FIFO_CMD
- iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call #queue_put
- add b32 $r11 0x400
- mov $r14 1
- iowr I[$r11 + 0x000] $r14 // FIFO_ACK
-
- // ack, and wake up main()
- ih_no_fifo:
- iowr I[$r0 + 0x100] $r10 // INTR_ACK
-
- pop $r15
- pop $r14
- pop $r13
- pop $r11
- pop $r10
- pop $r9
- pop $r8
- mov $flags $r8
- pop $r8
- bclr $flags $p0
- iret
-
-// Set this GPC's bit in HUB_BAR, used to signal completion of various
-// activities to the HUB fuc
-//
-hub_barrier_done:
- mov $r15 1
- ld b32 $r14 D[$r0 + #gpc_id]
- shl b32 $r15 $r14
- mov $r14 -0x6be8 // 0x409418 - HUB_BAR_SET
- sethi $r14 0x400000
- call #nv_wr32
- ret
-
-// Disables various things, waits a bit, and re-enables them..
-//
-// Not sure how exactly this helps, perhaps "ENABLE" is not such a
-// good description for the bits we turn off? Anyways, without this,
-// funny things happen.
-//
-ctx_redswitch:
- mov $r14 0x614
- shl b32 $r14 6
- mov $r15 0x020
- iowr I[$r14] $r15 // GPC_RED_SWITCH = POWER
- mov $r15 8
- ctx_redswitch_delay:
- sub b32 $r15 1
- bra ne #ctx_redswitch_delay
- mov $r15 0xa20
- iowr I[$r14] $r15 // GPC_RED_SWITCH = UNK11, ENABLE, POWER
- ret
-
-// Transfer GPC context data between GPU and storage area
-//
-// In: $r15 context base address
-// $p1 clear on save, set on load
-// $p2 set if opposite direction done/will be done, so:
-// on save it means: "a load will follow this save"
-// on load it means: "a save preceeded this load"
-//
-ctx_xfer:
- // set context base address
- mov $r1 0xa04
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r15// MEM_BASE
- bra not $p1 #ctx_xfer_not_load
- call #ctx_redswitch
- ctx_xfer_not_load:
-
- // strands
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xc
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call #strand_wait
- mov $r2 0x47fc
- sethi $r2 0x20000
- iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
- xbit $r2 $flags $p1
- add b32 $r2 3
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
-
- // mmio context
- xbit $r10 $flags $p1 // direction
- or $r10 2 // first
- mov $r11 0x0000
- sethi $r11 0x500000
- ld b32 $r12 D[$r0 + #gpc_id]
- shl b32 $r12 15
- add b32 $r11 $r12 // base = NV_PGRAPH_GPCn
- ld b32 $r12 D[$r0 + #gpc_mmio_list_head]
- ld b32 $r13 D[$r0 + #gpc_mmio_list_tail]
- mov $r14 0 // not multi
- call #mmctx_xfer
-
- // per-TPC mmio context
- xbit $r10 $flags $p1 // direction
- or $r10 4 // last
- mov $r11 0x4000
- sethi $r11 0x500000 // base = NV_PGRAPH_GPC0_TPC0
- ld b32 $r12 D[$r0 + #gpc_id]
- shl b32 $r12 15
- add b32 $r11 $r12 // base = NV_PGRAPH_GPCn_TPC0
- ld b32 $r12 D[$r0 + #tpc_mmio_list_head]
- ld b32 $r13 D[$r0 + #tpc_mmio_list_tail]
- ld b32 $r15 D[$r0 + #tpc_mask]
- mov $r14 0x800 // stride = 0x800
- call #mmctx_xfer
-
- // wait for strands to finish
- call #strand_wait
-
- // if load, or a save without a load following, do some
- // unknown stuff that's done after finishing a block of
- // strand commands
- bra $p1 #ctx_xfer_post
- bra not $p2 #ctx_xfer_done
- ctx_xfer_post:
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xd
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0d
- call #strand_wait
-
- // mark completion in HUB's barrier
- ctx_xfer_done:
- call #hub_barrier_done
- ret
-
-.align 256
+++ /dev/null
-uint32_t nvc0_grgpc_data[] = {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x000000c0,
- 0x012800c8,
- 0x01e40194,
- 0x000000c1,
- 0x012c00c8,
- 0x01f80194,
- 0x000000c3,
- 0x012800c8,
- 0x01f40194,
- 0x000000c4,
- 0x012800c8,
- 0x01f40194,
- 0x000000c8,
- 0x012800c8,
- 0x01e40194,
- 0x000000ce,
- 0x012800c8,
- 0x01f40194,
- 0x000000cf,
- 0x012800c8,
- 0x01f00194,
- 0x000000d9,
- 0x0194012c,
- 0x025401f8,
- 0x00000000,
- 0x00000380,
- 0x14000400,
- 0x20000450,
- 0x00000600,
- 0x00000684,
- 0x10000700,
- 0x00000800,
- 0x08000808,
- 0x00000828,
- 0x00000830,
- 0x000008d8,
- 0x000008e0,
- 0x140008e8,
- 0x0000091c,
- 0x08000924,
- 0x00000b00,
- 0x14000b08,
- 0x00000bb8,
- 0x00000c08,
- 0x1c000c10,
- 0x00000c80,
- 0x00000c8c,
- 0x08001000,
- 0x00001014,
- 0x00000c6c,
- 0x00000380,
- 0x04000400,
- 0x0800040c,
- 0x20000450,
- 0x00000600,
- 0x00000684,
- 0x10000700,
- 0x00000800,
- 0x08000808,
- 0x00000828,
- 0x00000830,
- 0x000008d8,
- 0x000008e0,
- 0x140008e8,
- 0x0000091c,
- 0x08000924,
- 0x00000b00,
- 0x14000b08,
- 0x00000bb8,
- 0x00000c08,
- 0x1c000c10,
- 0x00000c6c,
- 0x00000c80,
- 0x00000c8c,
- 0x08001000,
- 0x00001014,
- 0x00000018,
- 0x0000003c,
- 0x00000048,
- 0x00000064,
- 0x00000088,
- 0x14000200,
- 0x0400021c,
- 0x14000300,
- 0x000003d0,
- 0x040003e0,
- 0x08000400,
- 0x00000420,
- 0x000004b0,
- 0x000004e8,
- 0x000004f4,
- 0x04000520,
- 0x0c000604,
- 0x4c000644,
- 0x00000698,
- 0x04000750,
- 0x00000758,
- 0x000002c4,
- 0x000006e0,
- 0x000004bc,
- 0x00000544,
- 0x00000018,
- 0x0000003c,
- 0x00000048,
- 0x00000064,
- 0x00000088,
- 0x14000200,
- 0x0400021c,
- 0x000002c4,
- 0x14000300,
- 0x000003d0,
- 0x040003e0,
- 0x08000400,
- 0x08000420,
- 0x000004b0,
- 0x000004e8,
- 0x000004f4,
- 0x04000520,
- 0x00000544,
- 0x0c000604,
- 0x4c000644,
- 0x00000698,
- 0x000006e0,
- 0x08000750,
-};
-
-uint32_t nvc0_grgpc_code[] = {
- 0x03060ef5,
- 0x9800d898,
- 0x86f001d9,
- 0x0489b808,
- 0xf00c1bf4,
- 0x21f502f7,
- 0x00f802ec,
- 0xb60798c4,
- 0x8dbb0384,
- 0x0880b600,
- 0x80008e80,
- 0x90b6018f,
- 0x0f94f001,
- 0xf801d980,
- 0x0131f400,
- 0x9800d898,
- 0x89b801d9,
- 0x210bf404,
- 0xb60789c4,
- 0x9dbb0394,
- 0x0890b600,
- 0x98009e98,
- 0x80b6019f,
- 0x0f84f001,
- 0xf400d880,
- 0x00f80132,
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010321f5,
- 0xf840bfcf,
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
- 0x3c87f100,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d000,
- 0x081887f1,
- 0xd00684b6,
- 0x87f1008a,
- 0x84b60400,
- 0x0088cf06,
- 0xf4888aff,
- 0x87f1f31b,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00099,
- 0xf100f800,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00099f0,
- 0x87f10089,
- 0x84b60818,
- 0x008ad006,
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
- 0x085c87f1,
- 0xbd0684b6,
- 0x0099f094,
- 0xf80089d0,
- 0x9894bd00,
- 0x85b600e8,
- 0x0180b61a,
- 0xbb0284b6,
- 0xe0b60098,
- 0x04efb804,
- 0xb9eb1bf4,
- 0x00f8029f,
- 0x083c87f1,
- 0xbd0684b6,
- 0x0199f094,
- 0xf10089d0,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
- 0xb70199f0,
- 0xc8010080,
- 0xb4b600ab,
- 0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
- 0x008bcf00,
- 0xf412bbc8,
- 0x87f1fa1b,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00199,
- 0xf900f800,
- 0x02a7f0a0,
- 0xfcc921f4,
- 0xf100f8a0,
- 0xf04afc87,
- 0x97f00283,
- 0x0089d00c,
- 0x020721f5,
- 0x87f100f8,
- 0x83f04afc,
- 0x0d97f002,
- 0xf50089d0,
- 0xf8020721,
- 0xfca7f100,
- 0x02a3f04f,
- 0x0500aba2,
- 0xd00fc7f0,
- 0xc7f000ac,
- 0x00bcd00b,
- 0x020721f5,
- 0xf000aed0,
- 0xbcd00ac7,
- 0x0721f500,
- 0xf100f802,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00399f0,
- 0x21f50089,
- 0xe7f00213,
- 0x3921f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x0721f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x0721f500,
- 0x2621f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
- 0x8ed008fe,
- 0x408ed000,
- 0xb6808acf,
- 0xa0b606a5,
- 0x00eabb01,
- 0xb60480b6,
- 0x1bf40192,
- 0x08e4b6e8,
- 0xf1f2efbc,
- 0xb6085c87,
- 0x94bd0684,
- 0xd00399f0,
- 0x00f80089,
- 0xe7f1e0f9,
- 0xe3f09814,
- 0x8d21f440,
- 0x041ce0b7,
- 0xf401f7f0,
- 0xe0fc8d21,
- 0x04bd00f8,
- 0xf10004fe,
- 0xf0120017,
- 0x12d00227,
- 0x3e17f100,
- 0x0010fe04,
- 0x040017f1,
- 0xf0c010d0,
- 0x12d00427,
- 0x1031f400,
- 0x060817f1,
- 0xcf0614b6,
- 0x37f00012,
- 0x1f24f001,
- 0xb60432bb,
- 0x02800132,
- 0x04038003,
- 0x040010b7,
- 0x800012cf,
- 0x27f10002,
- 0x24b60800,
- 0x0022cf06,
- 0xb65817f0,
- 0x13980c10,
- 0x0432b800,
- 0xb00b0bf4,
- 0x1bf40034,
- 0xf100f8f1,
- 0xb6080027,
- 0x22cf0624,
- 0xf134bd40,
- 0xb6070047,
- 0x25950644,
- 0x0045d008,
- 0xbd4045d0,
- 0x58f4bde4,
- 0x1f58021e,
- 0x020e4003,
- 0xf5040f40,
- 0xbb013d21,
- 0x3fbb002f,
- 0x041e5800,
- 0x40051f58,
- 0x0f400a0e,
- 0x3d21f50c,
- 0x030e9801,
- 0xbb00effd,
- 0x3ebb002e,
- 0x0040b700,
- 0x0235b613,
- 0xb60043d0,
- 0x35b60825,
- 0x0120b606,
- 0xb60130b6,
- 0x34b60824,
- 0x022fb908,
- 0x026321f5,
- 0xf1003fbb,
- 0xb6080017,
- 0x13d00614,
- 0x0010b740,
- 0xf024bd08,
- 0x12d01f29,
- 0x0031f400,
- 0xf00028f4,
- 0x21f41cd7,
- 0xf401f439,
- 0xf404e4b0,
- 0x81fe1e18,
- 0x0627f001,
- 0x12fd20bd,
- 0x01e4b604,
- 0xfe051efd,
- 0x21f50018,
- 0x0ef404c3,
- 0x10ef94d3,
- 0xf501f5f0,
- 0xf402ec21,
- 0x80f9c60e,
- 0xf90188fe,
- 0xf990f980,
- 0xf9b0f9a0,
- 0xf9e0f9d0,
- 0x800acff0,
- 0xf404abc4,
- 0xb7f11d0b,
- 0xd7f01900,
- 0x40becf1c,
- 0xf400bfcf,
- 0xb0b70421,
- 0xe7f00400,
- 0x00bed001,
- 0xfc400ad0,
- 0xfce0fcf0,
- 0xfcb0fcd0,
- 0xfc90fca0,
- 0x0088fe80,
- 0x32f480fc,
- 0xf001f800,
- 0x0e9801f7,
- 0x04febb00,
- 0x9418e7f1,
- 0xf440e3f0,
- 0x00f88d21,
- 0x0614e7f1,
- 0xf006e4b6,
- 0xefd020f7,
- 0x08f7f000,
- 0xf401f2b6,
- 0xf7f1fd1b,
- 0xefd00a20,
- 0xf100f800,
- 0xb60a0417,
- 0x1fd00614,
- 0x0711f400,
- 0x04a421f5,
- 0x4afc17f1,
- 0xf00213f0,
- 0x12d00c27,
- 0x0721f500,
- 0xfc27f102,
- 0x0223f047,
- 0xf00020d0,
- 0x20b6012c,
- 0x0012d003,
- 0xf001acf0,
- 0xb7f002a5,
- 0x50b3f000,
- 0xb6000c98,
- 0xbcbb0fc4,
- 0x010c9800,
- 0xf0020d98,
- 0x21f500e7,
- 0xacf0015c,
- 0x04a5f001,
- 0x4000b7f1,
- 0x9850b3f0,
- 0xc4b6000c,
- 0x00bcbb0f,
- 0x98050c98,
- 0x0f98060d,
- 0x00e7f104,
- 0x5c21f508,
- 0x0721f501,
- 0x0601f402,
- 0xf11412f4,
- 0xf04afc17,
- 0x27f00213,
- 0x0012d00d,
- 0x020721f5,
- 0x048f21f5,
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
+++ /dev/null
-/* fuc microcode for nvc0 PGRAPH/HUB
- *
- * Copyright 2011 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-/* To build:
- * m4 nvc0_grhub.fuc | envyas -a -w -m fuc -V nva3 -o nvc0_grhub.fuc.h
- */
-
-.section #nvc0_grhub_data
-include(`nvc0_graph.fuc')
-gpc_count: .b32 0
-rop_count: .b32 0
-cmd_queue: queue_init
-hub_mmio_list_head: .b32 0
-hub_mmio_list_tail: .b32 0
-
-ctx_current: .b32 0
-
-chipsets:
-.b8 0xc0 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xc1 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc1_hub_mmio_tail
-.b8 0xc3 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xc4 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xc8 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xce 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xcf 0 0 0
-.b16 #nvc0_hub_mmio_head
-.b16 #nvc0_hub_mmio_tail
-.b8 0xd9 0 0 0
-.b16 #nvd9_hub_mmio_head
-.b16 #nvd9_hub_mmio_tail
-.b8 0 0 0 0
-
-nvc0_hub_mmio_head:
-mmctx_data(0x17e91c, 2)
-mmctx_data(0x400204, 2)
-mmctx_data(0x404004, 11)
-mmctx_data(0x404044, 1)
-mmctx_data(0x404094, 14)
-mmctx_data(0x4040d0, 7)
-mmctx_data(0x4040f8, 1)
-mmctx_data(0x404130, 3)
-mmctx_data(0x404150, 3)
-mmctx_data(0x404164, 2)
-mmctx_data(0x404174, 3)
-mmctx_data(0x404200, 8)
-mmctx_data(0x404404, 14)
-mmctx_data(0x404460, 4)
-mmctx_data(0x404480, 1)
-mmctx_data(0x404498, 1)
-mmctx_data(0x404604, 4)
-mmctx_data(0x404618, 32)
-mmctx_data(0x404698, 21)
-mmctx_data(0x4046f0, 2)
-mmctx_data(0x404700, 22)
-mmctx_data(0x405800, 1)
-mmctx_data(0x405830, 3)
-mmctx_data(0x405854, 1)
-mmctx_data(0x405870, 4)
-mmctx_data(0x405a00, 2)
-mmctx_data(0x405a18, 1)
-mmctx_data(0x406020, 1)
-mmctx_data(0x406028, 4)
-mmctx_data(0x4064a8, 2)
-mmctx_data(0x4064b4, 2)
-mmctx_data(0x407804, 1)
-mmctx_data(0x40780c, 6)
-mmctx_data(0x4078bc, 1)
-mmctx_data(0x408000, 7)
-mmctx_data(0x408064, 1)
-mmctx_data(0x408800, 3)
-mmctx_data(0x408900, 4)
-mmctx_data(0x408980, 1)
-nvc0_hub_mmio_tail:
-mmctx_data(0x4064c0, 2)
-nvc1_hub_mmio_tail:
-
-nvd9_hub_mmio_head:
-mmctx_data(0x17e91c, 2)
-mmctx_data(0x400204, 2)
-mmctx_data(0x404004, 10)
-mmctx_data(0x404044, 1)
-mmctx_data(0x404094, 14)
-mmctx_data(0x4040d0, 7)
-mmctx_data(0x4040f8, 1)
-mmctx_data(0x404130, 3)
-mmctx_data(0x404150, 3)
-mmctx_data(0x404164, 2)
-mmctx_data(0x404178, 2)
-mmctx_data(0x404200, 8)
-mmctx_data(0x404404, 14)
-mmctx_data(0x404460, 4)
-mmctx_data(0x404480, 1)
-mmctx_data(0x404498, 1)
-mmctx_data(0x404604, 4)
-mmctx_data(0x404618, 32)
-mmctx_data(0x404698, 21)
-mmctx_data(0x4046f0, 2)
-mmctx_data(0x404700, 22)
-mmctx_data(0x405800, 1)
-mmctx_data(0x405830, 3)
-mmctx_data(0x405854, 1)
-mmctx_data(0x405870, 4)
-mmctx_data(0x405a00, 2)
-mmctx_data(0x405a18, 1)
-mmctx_data(0x406020, 1)
-mmctx_data(0x406028, 4)
-mmctx_data(0x4064a8, 2)
-mmctx_data(0x4064b4, 5)
-mmctx_data(0x407804, 1)
-mmctx_data(0x40780c, 6)
-mmctx_data(0x4078bc, 1)
-mmctx_data(0x408000, 7)
-mmctx_data(0x408064, 1)
-mmctx_data(0x408800, 3)
-mmctx_data(0x408900, 4)
-mmctx_data(0x408980, 1)
-nvd9_hub_mmio_tail:
-
-.align 256
-chan_data:
-chan_mmio_count: .b32 0
-chan_mmio_address: .b32 0
-
-.align 256
-xfer_data: .b32 0
-
-.section #nvc0_grhub_code
-bra #init
-define(`include_code')
-include(`nvc0_graph.fuc')
-
-// reports an exception to the host
-//
-// In: $r15 error code (see nvc0_graph.fuc)
-//
-error:
- push $r14
- mov $r14 0x814
- shl b32 $r14 6
- iowr I[$r14 + 0x000] $r15 // CC_SCRATCH[5] = error code
- mov $r14 0xc1c
- shl b32 $r14 6
- mov $r15 1
- iowr I[$r14 + 0x000] $r15 // INTR_UP_SET
- pop $r14
- ret
-
-// HUB fuc initialisation, executed by triggering ucode start, will
-// fall through to main loop after completion.
-//
-// Input:
-// CC_SCRATCH[0]: chipset (PMC_BOOT_0 read returns 0x0bad0bad... sigh)
-//
-// Output:
-// CC_SCRATCH[0]:
-// 31:31: set to signal completion
-// CC_SCRATCH[1]:
-// 31:0: total PGRAPH context size
-//
-init:
- clear b32 $r0
- mov $sp $r0
- mov $xdbase $r0
-
- // enable fifo access
- mov $r1 0x1200
- mov $r2 2
- iowr I[$r1 + 0x000] $r2 // FIFO_ENABLE
-
- // setup i0 handler, and route all interrupts to it
- mov $r1 #ih
- mov $iv0 $r1
- mov $r1 0x400
- iowr I[$r1 + 0x300] $r0 // INTR_DISPATCH
-
- // route HUB_CHANNEL_SWITCH to fuc interrupt 8
- mov $r3 0x404
- shl b32 $r3 6
- mov $r2 0x2003 // { HUB_CHANNEL_SWITCH, ZERO } -> intr 8
- iowr I[$r3 + 0x000] $r2
-
- // not sure what these are, route them because NVIDIA does, and
- // the IRQ handler will signal the host if we ever get one.. we
- // may find out if/why we need to handle these if so..
- //
- mov $r2 0x2004
- iowr I[$r3 + 0x004] $r2 // { 0x04, ZERO } -> intr 9
- mov $r2 0x200b
- iowr I[$r3 + 0x008] $r2 // { 0x0b, ZERO } -> intr 10
- mov $r2 0x200c
- iowr I[$r3 + 0x01c] $r2 // { 0x0c, ZERO } -> intr 15
-
- // enable all INTR_UP interrupts
- mov $r2 0xc24
- shl b32 $r2 6
- not b32 $r3 $r0
- iowr I[$r2] $r3
-
- // enable fifo, ctxsw, 9, 10, 15 interrupts
- mov $r2 -0x78fc // 0x8704
- sethi $r2 0
- iowr I[$r1 + 0x000] $r2 // INTR_EN_SET
-
- // fifo level triggered, rest edge
- sub b32 $r1 0x100
- mov $r2 4
- iowr I[$r1] $r2
-
- // enable interrupts
- bset $flags ie0
-
- // fetch enabled GPC/ROP counts
- mov $r14 -0x69fc // 0x409604
- sethi $r14 0x400000
- call #nv_rd32
- extr $r1 $r15 16:20
- st b32 D[$r0 + #rop_count] $r1
- and $r15 0x1f
- st b32 D[$r0 + #gpc_count] $r15
-
- // set BAR_REQMASK to GPC mask
- mov $r1 1
- shl b32 $r1 $r15
- sub b32 $r1 1
- mov $r2 0x40c
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r1
- iowr I[$r2 + 0x100] $r1
-
- // find context data for this chipset
- mov $r2 0x800
- shl b32 $r2 6
- iord $r2 I[$r2 + 0x000] // CC_SCRATCH[0]
- mov $r15 #chipsets - 8
- init_find_chipset:
- add b32 $r15 8
- ld b32 $r3 D[$r15 + 0x00]
- cmpu b32 $r3 $r2
- bra e #init_context
- cmpu b32 $r3 0
- bra ne #init_find_chipset
- // unknown chipset
- ret
-
- // context size calculation, reserve first 256 bytes for use by fuc
- init_context:
- mov $r1 256
-
- // calculate size of mmio context data
- ld b16 $r14 D[$r15 + 4]
- ld b16 $r15 D[$r15 + 6]
- sethi $r14 0
- st b32 D[$r0 + #hub_mmio_list_head] $r14
- st b32 D[$r0 + #hub_mmio_list_tail] $r15
- call #mmctx_size
-
- // set mmctx base addresses now so we don't have to do it later,
- // they don't (currently) ever change
- mov $r3 0x700
- shl b32 $r3 6
- shr b32 $r4 $r1 8
- iowr I[$r3 + 0x000] $r4 // MMCTX_SAVE_SWBASE
- iowr I[$r3 + 0x100] $r4 // MMCTX_LOAD_SWBASE
- add b32 $r3 0x1300
- add b32 $r1 $r15
- shr b32 $r15 2
- iowr I[$r3 + 0x000] $r15 // MMCTX_LOAD_COUNT, wtf for?!?
-
- // strands, base offset needs to be aligned to 256 bytes
- shr b32 $r1 8
- add b32 $r1 1
- shl b32 $r1 8
- mov b32 $r15 $r1
- call #strand_ctx_init
- add b32 $r1 $r15
-
- // initialise each GPC in sequence by passing in the offset of its
- // context data in GPCn_CC_SCRATCH[1], and starting its FUC (which
- // has previously been uploaded by the host) running.
- //
- // the GPC fuc init sequence will set GPCn_CC_SCRATCH[0] bit 31
- // when it has completed, and return the size of its context data
- // in GPCn_CC_SCRATCH[1]
- //
- ld b32 $r3 D[$r0 + #gpc_count]
- mov $r4 0x2000
- sethi $r4 0x500000
- init_gpc:
- // setup, and start GPC ucode running
- add b32 $r14 $r4 0x804
- mov b32 $r15 $r1
- call #nv_wr32 // CC_SCRATCH[1] = ctx offset
- add b32 $r14 $r4 0x800
- mov b32 $r15 $r2
- call #nv_wr32 // CC_SCRATCH[0] = chipset
- add b32 $r14 $r4 0x10c
- clear b32 $r15
- call #nv_wr32
- add b32 $r14 $r4 0x104
- call #nv_wr32 // ENTRY
- add b32 $r14 $r4 0x100
- mov $r15 2 // CTRL_START_TRIGGER
- call #nv_wr32 // CTRL
-
- // wait for it to complete, and adjust context size
- add b32 $r14 $r4 0x800
- init_gpc_wait:
- call #nv_rd32
- xbit $r15 $r15 31
- bra e #init_gpc_wait
- add b32 $r14 $r4 0x804
- call #nv_rd32
- add b32 $r1 $r15
-
- // next!
- add b32 $r4 0x8000
- sub b32 $r3 1
- bra ne #init_gpc
-
- // save context size, and tell host we're ready
- mov $r2 0x800
- shl b32 $r2 6
- iowr I[$r2 + 0x100] $r1 // CC_SCRATCH[1] = context size
- add b32 $r2 0x800
- clear b32 $r1
- bset $r1 31
- iowr I[$r2 + 0x000] $r1 // CC_SCRATCH[0] |= 0x80000000
-
-// Main program loop, very simple, sleeps until woken up by the interrupt
-// handler, pulls a command from the queue and executes its handler
-//
-main:
- // sleep until we have something to do
- bset $flags $p0
- sleep $p0
- mov $r13 #cmd_queue
- call #queue_get
- bra $p1 #main
-
- // context switch, requested by GPU?
- cmpu b32 $r14 0x4001
- bra ne #main_not_ctx_switch
- trace_set(T_AUTO)
- mov $r1 0xb00
- shl b32 $r1 6
- iord $r2 I[$r1 + 0x100] // CHAN_NEXT
- iord $r1 I[$r1 + 0x000] // CHAN_CUR
-
- xbit $r3 $r1 31
- bra e #chsw_no_prev
- xbit $r3 $r2 31
- bra e #chsw_prev_no_next
- push $r2
- mov b32 $r2 $r1
- trace_set(T_SAVE)
- bclr $flags $p1
- bset $flags $p2
- call #ctx_xfer
- trace_clr(T_SAVE);
- pop $r2
- trace_set(T_LOAD);
- bset $flags $p1
- call #ctx_xfer
- trace_clr(T_LOAD);
- bra #chsw_done
- chsw_prev_no_next:
- push $r2
- mov b32 $r2 $r1
- bclr $flags $p1
- bclr $flags $p2
- call #ctx_xfer
- pop $r2
- mov $r1 0xb00
- shl b32 $r1 6
- iowr I[$r1] $r2
- bra #chsw_done
- chsw_no_prev:
- xbit $r3 $r2 31
- bra e #chsw_done
- bset $flags $p1
- bclr $flags $p2
- call #ctx_xfer
-
- // ack the context switch request
- chsw_done:
- mov $r1 0xb0c
- shl b32 $r1 6
- mov $r2 1
- iowr I[$r1 + 0x000] $r2 // 0x409b0c
- trace_clr(T_AUTO)
- bra #main
-
- // request to set current channel? (*not* a context switch)
- main_not_ctx_switch:
- cmpu b32 $r14 0x0001
- bra ne #main_not_ctx_chan
- mov b32 $r2 $r15
- call #ctx_chan
- bra #main_done
-
- // request to store current channel context?
- main_not_ctx_chan:
- cmpu b32 $r14 0x0002
- bra ne #main_not_ctx_save
- trace_set(T_SAVE)
- bclr $flags $p1
- bclr $flags $p2
- call #ctx_xfer
- trace_clr(T_SAVE)
- bra #main_done
-
- main_not_ctx_save:
- shl b32 $r15 $r14 16
- or $r15 E_BAD_COMMAND
- call #error
- bra #main
-
- main_done:
- mov $r1 0x820
- shl b32 $r1 6
- clear b32 $r2
- bset $r2 31
- iowr I[$r1 + 0x000] $r2 // CC_SCRATCH[0] |= 0x80000000
- bra #main
-
-// interrupt handler
-ih:
- push $r8
- mov $r8 $flags
- push $r8
- push $r9
- push $r10
- push $r11
- push $r13
- push $r14
- push $r15
-
- // incoming fifo command?
- iord $r10 I[$r0 + 0x200] // INTR
- and $r11 $r10 0x00000004
- bra e #ih_no_fifo
- // queue incoming fifo command for later processing
- mov $r11 0x1900
- mov $r13 #cmd_queue
- iord $r14 I[$r11 + 0x100] // FIFO_CMD
- iord $r15 I[$r11 + 0x000] // FIFO_DATA
- call #queue_put
- add b32 $r11 0x400
- mov $r14 1
- iowr I[$r11 + 0x000] $r14 // FIFO_ACK
-
- // context switch request?
- ih_no_fifo:
- and $r11 $r10 0x00000100
- bra e #ih_no_ctxsw
- // enqueue a context switch for later processing
- mov $r13 #cmd_queue
- mov $r14 0x4001
- call #queue_put
-
- // anything we didn't handle, bring it to the host's attention
- ih_no_ctxsw:
- mov $r11 0x104
- not b32 $r11
- and $r11 $r10 $r11
- bra e #ih_no_other
- mov $r10 0xc1c
- shl b32 $r10 6
- iowr I[$r10] $r11 // INTR_UP_SET
-
- // ack, and wake up main()
- ih_no_other:
- iowr I[$r0 + 0x100] $r10 // INTR_ACK
-
- pop $r15
- pop $r14
- pop $r13
- pop $r11
- pop $r10
- pop $r9
- pop $r8
- mov $flags $r8
- pop $r8
- bclr $flags $p0
- iret
-
-// Not real sure, but, MEM_CMD 7 will hang forever if this isn't done
-ctx_4160s:
- mov $r14 0x4160
- sethi $r14 0x400000
- mov $r15 1
- call #nv_wr32
- ctx_4160s_wait:
- call #nv_rd32
- xbit $r15 $r15 4
- bra e #ctx_4160s_wait
- ret
-
-// Without clearing again at end of xfer, some things cause PGRAPH
-// to hang with STATUS=0x00000007 until it's cleared.. fbcon can
-// still function with it set however...
-ctx_4160c:
- mov $r14 0x4160
- sethi $r14 0x400000
- clear b32 $r15
- call #nv_wr32
- ret
-
-// Again, not real sure
-//
-// In: $r15 value to set 0x404170 to
-//
-ctx_4170s:
- mov $r14 0x4170
- sethi $r14 0x400000
- or $r15 0x10
- call #nv_wr32
- ret
-
-// Waits for a ctx_4170s() call to complete
-//
-ctx_4170w:
- mov $r14 0x4170
- sethi $r14 0x400000
- call #nv_rd32
- and $r15 0x10
- bra ne #ctx_4170w
- ret
-
-// Disables various things, waits a bit, and re-enables them..
-//
-// Not sure how exactly this helps, perhaps "ENABLE" is not such a
-// good description for the bits we turn off? Anyways, without this,
-// funny things happen.
-//
-ctx_redswitch:
- mov $r14 0x614
- shl b32 $r14 6
- mov $r15 0x270
- iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_GPC, POWER_ALL
- mov $r15 8
- ctx_redswitch_delay:
- sub b32 $r15 1
- bra ne #ctx_redswitch_delay
- mov $r15 0x770
- iowr I[$r14] $r15 // HUB_RED_SWITCH = ENABLE_ALL, POWER_ALL
- ret
-
-// Not a clue what this is for, except that unless the value is 0x10, the
-// strand context is saved (and presumably restored) incorrectly..
-//
-// In: $r15 value to set to (0x00/0x10 are used)
-//
-ctx_86c:
- mov $r14 0x86c
- shl b32 $r14 6
- iowr I[$r14] $r15 // HUB(0x86c) = val
- mov $r14 -0x75ec
- sethi $r14 0x400000
- call #nv_wr32 // ROP(0xa14) = val
- mov $r14 -0x5794
- sethi $r14 0x410000
- call #nv_wr32 // GPC(0x86c) = val
- ret
-
-// ctx_load - load's a channel's ctxctl data, and selects its vm
-//
-// In: $r2 channel address
-//
-ctx_load:
- trace_set(T_CHAN)
-
- // switch to channel, somewhat magic in parts..
- mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa24
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r0 // 0x409a24
- mov $r3 0xb00
- shl b32 $r3 6
- iowr I[$r3 + 0x100] $r2 // CHAN_NEXT
- mov $r1 0xa0c
- shl b32 $r1 6
- mov $r4 7
- iowr I[$r1 + 0x000] $r2 // MEM_CHAN
- iowr I[$r1 + 0x100] $r4 // MEM_CMD
- ctx_chan_wait_0:
- iord $r4 I[$r1 + 0x100]
- and $r4 0x1f
- bra ne #ctx_chan_wait_0
- iowr I[$r3 + 0x000] $r2 // CHAN_CUR
-
- // load channel header, fetch PGRAPH context pointer
- mov $xtargets $r0
- bclr $r2 31
- shl b32 $r2 4
- add b32 $r2 2
-
- trace_set(T_LCHAN)
- mov $r1 0xa04
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r2 // MEM_BASE
- mov $r1 0xa20
- shl b32 $r1 6
- mov $r2 0x0002
- sethi $r2 0x80000000
- iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vram
- mov $r1 0x10 // chan + 0x0210
- mov $r2 #xfer_data
- sethi $r2 0x00020000 // 16 bytes
- xdld $r1 $r2
- xdwait
- trace_clr(T_LCHAN)
-
- // update current context
- ld b32 $r1 D[$r0 + #xfer_data + 4]
- shl b32 $r1 24
- ld b32 $r2 D[$r0 + #xfer_data + 0]
- shr b32 $r2 8
- or $r1 $r2
- st b32 D[$r0 + #ctx_current] $r1
-
- // set transfer base to start of context, and fetch context header
- trace_set(T_LCTXH)
- mov $r2 0xa04
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r1 // MEM_BASE
- mov $r2 1
- mov $r1 0xa20
- shl b32 $r1 6
- iowr I[$r1 + 0x000] $r2 // MEM_TARGET = vm
- mov $r1 #chan_data
- sethi $r1 0x00060000 // 256 bytes
- xdld $r0 $r1
- xdwait
- trace_clr(T_LCTXH)
-
- trace_clr(T_CHAN)
- ret
-
-// ctx_chan - handler for HUB_SET_CHAN command, will set a channel as
-// the active channel for ctxctl, but not actually transfer
-// any context data. intended for use only during initial
-// context construction.
-//
-// In: $r2 channel address
-//
-ctx_chan:
- call #ctx_4160s
- call #ctx_load
- mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa10
- shl b32 $r1 6
- mov $r2 5
- iowr I[$r1 + 0x000] $r2 // MEM_CMD = 5 (???)
- ctx_chan_wait:
- iord $r2 I[$r1 + 0x000]
- or $r2 $r2
- bra ne #ctx_chan_wait
- call #ctx_4160c
- ret
-
-// Execute per-context state overrides list
-//
-// Only executed on the first load of a channel. Might want to look into
-// removing this and having the host directly modify the channel's context
-// to change this state... The nouveau DRM already builds this list as
-// it's definitely needed for NVIDIA's, so we may as well use it for now
-//
-// Input: $r1 mmio list length
-//
-ctx_mmio_exec:
- // set transfer base to be the mmio list
- ld b32 $r3 D[$r0 + #chan_mmio_address]
- mov $r2 0xa04
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r3 // MEM_BASE
-
- clear b32 $r3
- ctx_mmio_loop:
- // fetch next 256 bytes of mmio list if necessary
- and $r4 $r3 0xff
- bra ne #ctx_mmio_pull
- mov $r5 #xfer_data
- sethi $r5 0x00060000 // 256 bytes
- xdld $r3 $r5
- xdwait
-
- // execute a single list entry
- ctx_mmio_pull:
- ld b32 $r14 D[$r4 + #xfer_data + 0x00]
- ld b32 $r15 D[$r4 + #xfer_data + 0x04]
- call #nv_wr32
-
- // next!
- add b32 $r3 8
- sub b32 $r1 1
- bra ne #ctx_mmio_loop
-
- // set transfer base back to the current context
- ctx_mmio_done:
- ld b32 $r3 D[$r0 + #ctx_current]
- iowr I[$r2 + 0x000] $r3 // MEM_BASE
-
- // disable the mmio list now, we don't need/want to execute it again
- st b32 D[$r0 + #chan_mmio_count] $r0
- mov $r1 #chan_data
- sethi $r1 0x00060000 // 256 bytes
- xdst $r0 $r1
- xdwait
- ret
-
-// Transfer HUB context data between GPU and storage area
-//
-// In: $r2 channel address
-// $p1 clear on save, set on load
-// $p2 set if opposite direction done/will be done, so:
-// on save it means: "a load will follow this save"
-// on load it means: "a save preceeded this load"
-//
-ctx_xfer:
- bra not $p1 #ctx_xfer_pre
- bra $p2 #ctx_xfer_pre_load
- ctx_xfer_pre:
- mov $r15 0x10
- call #ctx_86c
- call #ctx_4160s
- bra not $p1 #ctx_xfer_exec
-
- ctx_xfer_pre_load:
- mov $r15 2
- call #ctx_4170s
- call #ctx_4170w
- call #ctx_redswitch
- clear b32 $r15
- call #ctx_4170s
- call #ctx_load
-
- // fetch context pointer, and initiate xfer on all GPCs
- ctx_xfer_exec:
- ld b32 $r1 D[$r0 + #ctx_current]
- mov $r2 0x414
- shl b32 $r2 6
- iowr I[$r2 + 0x000] $r0 // BAR_STATUS = reset
- mov $r14 -0x5b00
- sethi $r14 0x410000
- mov b32 $r15 $r1
- call #nv_wr32 // GPC_BCAST_WRCMD_DATA = ctx pointer
- add b32 $r14 4
- xbit $r15 $flags $p1
- xbit $r2 $flags $p2
- shl b32 $r2 1
- or $r15 $r2
- call #nv_wr32 // GPC_BCAST_WRCMD_CMD = GPC_XFER(type)
-
- // strands
- mov $r1 0x4afc
- sethi $r1 0x20000
- mov $r2 0xc
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x0c
- call #strand_wait
- mov $r2 0x47fc
- sethi $r2 0x20000
- iowr I[$r2] $r0 // STRAND_FIRST_GENE(0x3f) = 0x00
- xbit $r2 $flags $p1
- add b32 $r2 3
- iowr I[$r1] $r2 // STRAND_CMD(0x3f) = 0x03/0x04 (SAVE/LOAD)
-
- // mmio context
- xbit $r10 $flags $p1 // direction
- or $r10 6 // first, last
- mov $r11 0 // base = 0
- ld b32 $r12 D[$r0 + #hub_mmio_list_head]
- ld b32 $r13 D[$r0 + #hub_mmio_list_tail]
- mov $r14 0 // not multi
- call #mmctx_xfer
-
- // wait for GPCs to all complete
- mov $r10 8 // DONE_BAR
- call #wait_doneo
-
- // wait for strand xfer to complete
- call #strand_wait
-
- // post-op
- bra $p1 #ctx_xfer_post
- mov $r10 12 // DONE_UNK12
- call #wait_donez
- mov $r1 0xa10
- shl b32 $r1 6
- mov $r2 5
- iowr I[$r1] $r2 // MEM_CMD
- ctx_xfer_post_save_wait:
- iord $r2 I[$r1]
- or $r2 $r2
- bra ne #ctx_xfer_post_save_wait
-
- bra $p2 #ctx_xfer_done
- ctx_xfer_post:
- mov $r15 2
- call #ctx_4170s
- clear b32 $r15
- call #ctx_86c
- call #strand_post
- call #ctx_4170w
- clear b32 $r15
- call #ctx_4170s
-
- bra not $p1 #ctx_xfer_no_post_mmio
- ld b32 $r1 D[$r0 + #chan_mmio_count]
- or $r1 $r1
- bra e #ctx_xfer_no_post_mmio
- call #ctx_mmio_exec
-
- ctx_xfer_no_post_mmio:
- call #ctx_4160c
-
- ctx_xfer_done:
- ret
-
-.align 256
+++ /dev/null
-uint32_t nvc0_grhub_data[] = {
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x000000c0,
- 0x013c00a0,
- 0x000000c1,
- 0x014000a0,
- 0x000000c3,
- 0x013c00a0,
- 0x000000c4,
- 0x013c00a0,
- 0x000000c8,
- 0x013c00a0,
- 0x000000ce,
- 0x013c00a0,
- 0x000000cf,
- 0x013c00a0,
- 0x000000d9,
- 0x01dc0140,
- 0x00000000,
- 0x0417e91c,
- 0x04400204,
- 0x28404004,
- 0x00404044,
- 0x34404094,
- 0x184040d0,
- 0x004040f8,
- 0x08404130,
- 0x08404150,
- 0x04404164,
- 0x08404174,
- 0x1c404200,
- 0x34404404,
- 0x0c404460,
- 0x00404480,
- 0x00404498,
- 0x0c404604,
- 0x7c404618,
- 0x50404698,
- 0x044046f0,
- 0x54404700,
- 0x00405800,
- 0x08405830,
- 0x00405854,
- 0x0c405870,
- 0x04405a00,
- 0x00405a18,
- 0x00406020,
- 0x0c406028,
- 0x044064a8,
- 0x044064b4,
- 0x00407804,
- 0x1440780c,
- 0x004078bc,
- 0x18408000,
- 0x00408064,
- 0x08408800,
- 0x0c408900,
- 0x00408980,
- 0x044064c0,
- 0x0417e91c,
- 0x04400204,
- 0x24404004,
- 0x00404044,
- 0x34404094,
- 0x184040d0,
- 0x004040f8,
- 0x08404130,
- 0x08404150,
- 0x04404164,
- 0x04404178,
- 0x1c404200,
- 0x34404404,
- 0x0c404460,
- 0x00404480,
- 0x00404498,
- 0x0c404604,
- 0x7c404618,
- 0x50404698,
- 0x044046f0,
- 0x54404700,
- 0x00405800,
- 0x08405830,
- 0x00405854,
- 0x0c405870,
- 0x04405a00,
- 0x00405a18,
- 0x00406020,
- 0x0c406028,
- 0x044064a8,
- 0x104064b4,
- 0x00407804,
- 0x1440780c,
- 0x004078bc,
- 0x18408000,
- 0x00408064,
- 0x08408800,
- 0x0c408900,
- 0x00408980,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
-
-uint32_t nvc0_grhub_code[] = {
- 0x03090ef5,
- 0x9800d898,
- 0x86f001d9,
- 0x0489b808,
- 0xf00c1bf4,
- 0x21f502f7,
- 0x00f802ec,
- 0xb60798c4,
- 0x8dbb0384,
- 0x0880b600,
- 0x80008e80,
- 0x90b6018f,
- 0x0f94f001,
- 0xf801d980,
- 0x0131f400,
- 0x9800d898,
- 0x89b801d9,
- 0x210bf404,
- 0xb60789c4,
- 0x9dbb0394,
- 0x0890b600,
- 0x98009e98,
- 0x80b6019f,
- 0x0f84f001,
- 0xf400d880,
- 0x00f80132,
- 0x0728b7f1,
- 0xb906b4b6,
- 0xc9f002ec,
- 0x00bcd01f,
- 0xc800bccf,
- 0x1bf41fcc,
- 0x06a7f0fa,
- 0x010321f5,
- 0xf840bfcf,
- 0x28b7f100,
- 0x06b4b607,
- 0xb980bfd0,
- 0xc9f002ec,
- 0x1ec9f01f,
- 0xcf00bcd0,
- 0xccc800bc,
- 0xfa1bf41f,
- 0x87f100f8,
- 0x84b60430,
- 0x1ff9f006,
- 0xf8008fd0,
- 0x3087f100,
- 0x0684b604,
- 0xf80080d0,
- 0x3c87f100,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d000,
- 0x081887f1,
- 0xd00684b6,
- 0x87f1008a,
- 0x84b60400,
- 0x0088cf06,
- 0xf4888aff,
- 0x87f1f31b,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00099,
- 0xf100f800,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00099f0,
- 0x87f10089,
- 0x84b60818,
- 0x008ad006,
- 0x040087f1,
- 0xcf0684b6,
- 0x8aff0088,
- 0xf30bf488,
- 0x085c87f1,
- 0xbd0684b6,
- 0x0099f094,
- 0xf80089d0,
- 0x9894bd00,
- 0x85b600e8,
- 0x0180b61a,
- 0xbb0284b6,
- 0xe0b60098,
- 0x04efb804,
- 0xb9eb1bf4,
- 0x00f8029f,
- 0x083c87f1,
- 0xbd0684b6,
- 0x0199f094,
- 0xf10089d0,
- 0xb6071087,
- 0x94bd0684,
- 0xf405bbfd,
- 0x8bd0090b,
- 0x0099f000,
- 0xf405eefd,
- 0x8ed00c0b,
- 0xc08fd080,
- 0xb70199f0,
- 0xc8010080,
- 0xb4b600ab,
- 0x0cb9f010,
- 0xb601aec8,
- 0xbefd11e4,
- 0x008bd005,
- 0xf0008ecf,
- 0x0bf41fe4,
- 0x00ce98fa,
- 0xd005e9fd,
- 0xc0b6c08e,
- 0x04cdb804,
- 0xc8e81bf4,
- 0x1bf402ab,
- 0x008bcf18,
- 0xb01fb4f0,
- 0x1bf410b4,
- 0x02a7f0f7,
- 0xf4c921f4,
- 0xabc81b0e,
- 0x10b4b600,
- 0xf00cb9f0,
- 0x8bd012b9,
- 0x008bcf00,
- 0xf412bbc8,
- 0x87f1fa1b,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00199,
- 0xf900f800,
- 0x02a7f0a0,
- 0xfcc921f4,
- 0xf100f8a0,
- 0xf04afc87,
- 0x97f00283,
- 0x0089d00c,
- 0x020721f5,
- 0x87f100f8,
- 0x83f04afc,
- 0x0d97f002,
- 0xf50089d0,
- 0xf8020721,
- 0xfca7f100,
- 0x02a3f04f,
- 0x0500aba2,
- 0xd00fc7f0,
- 0xc7f000ac,
- 0x00bcd00b,
- 0x020721f5,
- 0xf000aed0,
- 0xbcd00ac7,
- 0x0721f500,
- 0xf100f802,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00399f0,
- 0x21f50089,
- 0xe7f00213,
- 0x3921f503,
- 0xfca7f102,
- 0x02a3f046,
- 0x0400aba0,
- 0xf040a0d0,
- 0xbcd001c7,
- 0x0721f500,
- 0x010c9202,
- 0xf000acd0,
- 0xbcd002c7,
- 0x0721f500,
- 0x2621f502,
- 0x8087f102,
- 0x0684b608,
- 0xb70089cf,
- 0x95220080,
- 0x8ed008fe,
- 0x408ed000,
- 0xb6808acf,
- 0xa0b606a5,
- 0x00eabb01,
- 0xb60480b6,
- 0x1bf40192,
- 0x08e4b6e8,
- 0xf1f2efbc,
- 0xb6085c87,
- 0x94bd0684,
- 0xd00399f0,
- 0x00f80089,
- 0xe7f1e0f9,
- 0xe4b60814,
- 0x00efd006,
- 0x0c1ce7f1,
- 0xf006e4b6,
- 0xefd001f7,
- 0xf8e0fc00,
- 0xfe04bd00,
- 0x07fe0004,
- 0x0017f100,
- 0x0227f012,
- 0xf10012d0,
- 0xfe05b917,
- 0x17f10010,
- 0x10d00400,
- 0x0437f1c0,
- 0x0634b604,
- 0x200327f1,
- 0xf10032d0,
- 0xd0200427,
- 0x27f10132,
- 0x32d0200b,
- 0x0c27f102,
- 0x0732d020,
- 0x0c2427f1,
- 0xb90624b6,
- 0x23d00003,
- 0x0427f100,
- 0x0023f087,
- 0xb70012d0,
- 0xf0010012,
- 0x12d00427,
- 0x1031f400,
- 0x9604e7f1,
- 0xf440e3f0,
- 0xf1c76821,
- 0x01018090,
- 0x801ff4f0,
- 0x17f0000f,
- 0x041fbb01,
- 0xf10112b6,
- 0xb6040c27,
- 0x21d00624,
- 0x4021d000,
- 0x080027f1,
- 0xcf0624b6,
- 0xf7f00022,
- 0x08f0b654,
- 0xb800f398,
- 0x0bf40432,
- 0x0034b00b,
- 0xf8f11bf4,
- 0x0017f100,
- 0x02fe5801,
- 0xf003ff58,
- 0x0e8000e3,
- 0x150f8014,
- 0x013d21f5,
- 0x070037f1,
- 0x950634b6,
- 0x34d00814,
- 0x4034d000,
- 0x130030b7,
- 0xb6001fbb,
- 0x3fd002f5,
- 0x0815b600,
- 0xb60110b6,
- 0x1fb90814,
- 0x6321f502,
- 0x001fbb02,
- 0xf1000398,
- 0xf0200047,
- 0x4ea05043,
- 0x1fb90804,
- 0x8d21f402,
- 0x08004ea0,
- 0xf4022fb9,
- 0x4ea08d21,
- 0xf4bd010c,
- 0xa08d21f4,
- 0xf401044e,
- 0x4ea08d21,
- 0xf7f00100,
- 0x8d21f402,
- 0x08004ea0,
- 0xc86821f4,
- 0x0bf41fff,
- 0x044ea0fa,
- 0x6821f408,
- 0xb7001fbb,
- 0xb6800040,
- 0x1bf40132,
- 0x0027f1b4,
- 0x0624b608,
- 0xb74021d0,
- 0xbd080020,
- 0x1f19f014,
- 0xf40021d0,
- 0x28f40031,
- 0x08d7f000,
- 0xf43921f4,
- 0xe4b1f401,
- 0x1bf54001,
- 0x87f100d1,
- 0x84b6083c,
- 0xf094bd06,
- 0x89d00499,
- 0x0017f100,
- 0x0614b60b,
- 0xcf4012cf,
- 0x13c80011,
- 0x7e0bf41f,
- 0xf41f23c8,
- 0x20f95a0b,
- 0xf10212b9,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00799f0,
- 0x32f40089,
- 0x0231f401,
- 0x082921f5,
- 0x085c87f1,
- 0xbd0684b6,
- 0x0799f094,
- 0xfc0089d0,
- 0x3c87f120,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d006,
- 0xf50131f4,
- 0xf1082921,
- 0xb6085c87,
- 0x94bd0684,
- 0xd00699f0,
- 0x0ef40089,
- 0xb920f931,
- 0x32f40212,
- 0x0232f401,
- 0x082921f5,
- 0x17f120fc,
- 0x14b60b00,
- 0x0012d006,
- 0xc8130ef4,
- 0x0bf41f23,
- 0x0131f40d,
- 0xf50232f4,
- 0xf1082921,
- 0xb60b0c17,
- 0x27f00614,
- 0x0012d001,
- 0x085c87f1,
- 0xbd0684b6,
- 0x0499f094,
- 0xf50089d0,
- 0xb0ff200e,
- 0x1bf401e4,
- 0x02f2b90d,
- 0x07b521f5,
- 0xb0420ef4,
- 0x1bf402e4,
- 0x3c87f12e,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d007,
- 0xf40132f4,
- 0x21f50232,
- 0x87f10829,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00799,
- 0x110ef400,
- 0xf010ef94,
- 0x21f501f5,
- 0x0ef502ec,
- 0x17f1fed1,
- 0x14b60820,
- 0xf024bd06,
- 0x12d01f29,
- 0xbe0ef500,
- 0xfe80f9fe,
- 0x80f90188,
- 0xa0f990f9,
- 0xd0f9b0f9,
- 0xf0f9e0f9,
- 0xc4800acf,
- 0x0bf404ab,
- 0x00b7f11d,
- 0x08d7f019,
- 0xcf40becf,
- 0x21f400bf,
- 0x00b0b704,
- 0x01e7f004,
- 0xe400bed0,
- 0xf40100ab,
- 0xd7f00d0b,
- 0x01e7f108,
- 0x0421f440,
- 0x0104b7f1,
- 0xabffb0bd,
- 0x0d0bf4b4,
- 0x0c1ca7f1,
- 0xd006a4b6,
- 0x0ad000ab,
- 0xfcf0fc40,
- 0xfcd0fce0,
- 0xfca0fcb0,
- 0xfe80fc90,
- 0x80fc0088,
- 0xf80032f4,
- 0x60e7f101,
- 0x40e3f041,
- 0xf401f7f0,
- 0x21f48d21,
- 0x04ffc868,
- 0xf8fa0bf4,
- 0x60e7f100,
- 0x40e3f041,
- 0x21f4f4bd,
- 0xf100f88d,
- 0xf04170e7,
- 0xf5f040e3,
- 0x8d21f410,
- 0xe7f100f8,
- 0xe3f04170,
- 0x6821f440,
- 0xf410f4f0,
- 0x00f8f31b,
- 0x0614e7f1,
- 0xf106e4b6,
- 0xd00270f7,
- 0xf7f000ef,
- 0x01f2b608,
- 0xf1fd1bf4,
- 0xd00770f7,
- 0x00f800ef,
- 0x086ce7f1,
- 0xd006e4b6,
- 0xe7f100ef,
- 0xe3f08a14,
- 0x8d21f440,
- 0xa86ce7f1,
- 0xf441e3f0,
- 0x00f88d21,
- 0x083c87f1,
- 0xbd0684b6,
- 0x0599f094,
- 0xf00089d0,
- 0x21f40ca7,
- 0x2417f1c9,
- 0x0614b60a,
- 0xf10010d0,
- 0xb60b0037,
- 0x32d00634,
- 0x0c17f140,
- 0x0614b60a,
- 0xd00747f0,
- 0x14d00012,
- 0x4014cf40,
- 0xf41f44f0,
- 0x32d0fa1b,
- 0x000bfe00,
- 0xb61f2af0,
- 0x20b60424,
- 0x3c87f102,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d008,
- 0x0a0417f1,
- 0xd00614b6,
- 0x17f10012,
- 0x14b60a20,
- 0x0227f006,
- 0x800023f1,
- 0xf00012d0,
- 0x27f11017,
- 0x23f00300,
- 0x0512fa02,
- 0x87f103f8,
- 0x84b6085c,
- 0xf094bd06,
- 0x89d00899,
- 0xc1019800,
- 0x981814b6,
- 0x25b6c002,
- 0x0512fd08,
- 0xf1160180,
- 0xb6083c87,
- 0x94bd0684,
- 0xd00999f0,
- 0x27f10089,
- 0x24b60a04,
- 0x0021d006,
- 0xf10127f0,
- 0xb60a2017,
- 0x12d00614,
- 0x0017f100,
- 0x0613f002,
- 0xf80501fa,
- 0x5c87f103,
- 0x0684b608,
- 0x99f094bd,
- 0x0089d009,
- 0x085c87f1,
- 0xbd0684b6,
- 0x0599f094,
- 0xf80089d0,
- 0x3121f500,
- 0xb821f506,
- 0x0ca7f006,
- 0xf1c921f4,
- 0xb60a1017,
- 0x27f00614,
- 0x0012d005,
- 0xfd0012cf,
- 0x1bf40522,
- 0x4921f5fa,
- 0x9800f806,
- 0x27f18103,
- 0x24b60a04,
- 0x0023d006,
- 0x34c434bd,
- 0x0f1bf4ff,
- 0x030057f1,
- 0xfa0653f0,
- 0x03f80535,
- 0x98c04e98,
- 0x21f4c14f,
- 0x0830b68d,
- 0xf40112b6,
- 0x0398df1b,
- 0x0023d016,
- 0xf1800080,
- 0xf0020017,
- 0x01fa0613,
- 0xf803f806,
- 0x0611f400,
- 0xf01102f4,
- 0x21f510f7,
- 0x21f50698,
- 0x11f40631,
- 0x02f7f01c,
- 0x065721f5,
- 0x066621f5,
- 0x067821f5,
- 0x21f5f4bd,
- 0x21f50657,
- 0x019806b8,
- 0x1427f116,
- 0x0624b604,
- 0xf10020d0,
- 0xf0a500e7,
- 0x1fb941e3,
- 0x8d21f402,
- 0xf004e0b6,
- 0x2cf001fc,
- 0x0124b602,
- 0xf405f2fd,
- 0x17f18d21,
- 0x13f04afc,
- 0x0c27f002,
- 0xf50012d0,
- 0xf1020721,
- 0xf047fc27,
- 0x20d00223,
- 0x012cf000,
- 0xd00320b6,
- 0xacf00012,
- 0x06a5f001,
- 0x9800b7f0,
- 0x0d98140c,
- 0x00e7f015,
- 0x015c21f5,
- 0xf508a7f0,
- 0xf5010321,
- 0xf4020721,
- 0xa7f02201,
- 0xc921f40c,
- 0x0a1017f1,
- 0xf00614b6,
- 0x12d00527,
- 0x0012cf00,
- 0xf40522fd,
- 0x02f4fa1b,
- 0x02f7f032,
- 0x065721f5,
- 0x21f5f4bd,
- 0x21f50698,
- 0x21f50226,
- 0xf4bd0666,
- 0x065721f5,
- 0x981011f4,
- 0x11fd8001,
- 0x070bf405,
- 0x07df21f5,
- 0x064921f5,
- 0x000000f8,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
- 0x00000000,
-};
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_vm.h"
-
-struct nvc0_instmem_priv {
- struct nouveau_gpuobj *bar1_pgd;
- struct nouveau_channel *bar1;
- struct nouveau_gpuobj *bar3_pgd;
- struct nouveau_channel *bar3;
-};
-
-int
-nvc0_instmem_suspend(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- dev_priv->ramin_available = false;
- return 0;
-}
-
-void
-nvc0_instmem_resume(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
-
- nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
- nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
- dev_priv->ramin_available = true;
-}
-
-static void
-nvc0_channel_del(struct nouveau_channel **pchan)
-{
- struct nouveau_channel *chan;
-
- chan = *pchan;
- *pchan = NULL;
- if (!chan)
- return;
-
- nouveau_vm_ref(NULL, &chan->vm, NULL);
- if (drm_mm_initialized(&chan->ramin_heap))
- drm_mm_takedown(&chan->ramin_heap);
- nouveau_gpuobj_ref(NULL, &chan->ramin);
- kfree(chan);
-}
-
-static int
-nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
- struct nouveau_channel **pchan,
- struct nouveau_gpuobj *pgd, u64 vm_size)
-{
- struct nouveau_channel *chan;
- int ret;
-
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
- if (!chan)
- return -ENOMEM;
- chan->dev = dev;
-
- ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
- if (ret) {
- nvc0_channel_del(&chan);
- return ret;
- }
-
- ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
- if (ret) {
- nvc0_channel_del(&chan);
- return ret;
- }
-
- ret = nouveau_vm_ref(vm, &chan->vm, NULL);
- if (ret) {
- nvc0_channel_del(&chan);
- return ret;
- }
-
- nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
- nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
- nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
- nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
-
- *pchan = chan;
- return 0;
-}
-
-int
-nvc0_instmem_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct pci_dev *pdev = dev->pdev;
- struct nvc0_instmem_priv *priv;
- struct nouveau_vm *vm = NULL;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
- pinstmem->priv = priv;
-
- /* BAR3 VM */
- ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
- &dev_priv->bar3_vm);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL,
- (pci_resource_len(pdev, 3) >> 12) * 8, 0,
- NVOBJ_FLAG_DONT_MAP |
- NVOBJ_FLAG_ZERO_ALLOC,
- &dev_priv->bar3_vm->pgt[0].obj[0]);
- if (ret)
- goto error;
- dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
-
- nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
- if (ret)
- goto error;
-
- ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
- if (ret)
- goto error;
- nouveau_vm_ref(NULL, &vm, NULL);
-
- ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
- priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
- if (ret)
- goto error;
-
- /* BAR1 VM */
- ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
- if (ret)
- goto error;
-
- ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
- if (ret)
- goto error;
- nouveau_vm_ref(NULL, &vm, NULL);
-
- ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
- priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
- if (ret)
- goto error;
-
- /* channel vm */
- ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL,
- &dev_priv->chan_vm);
- if (ret)
- goto error;
-
- nvc0_instmem_resume(dev);
- return 0;
-error:
- nvc0_instmem_takedown(dev);
- return ret;
-}
-
-void
-nvc0_instmem_takedown(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
- struct nouveau_vm *vm = NULL;
-
- nvc0_instmem_suspend(dev);
-
- nv_wr32(dev, 0x1704, 0x00000000);
- nv_wr32(dev, 0x1714, 0x00000000);
-
- nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL);
-
- nvc0_channel_del(&priv->bar1);
- nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
- nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
-
- nvc0_channel_del(&priv->bar3);
- nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
- nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
- nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
- nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
- nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
-
- dev_priv->engine.instmem.priv = NULL;
- kfree(priv);
-}
-
#include "drmP.h"
#include "nouveau_drv.h"
-#include "nouveau_bios.h"
+#include <nouveau_bios.h>
#include "nouveau_pm.h"
static u32 read_div(struct drm_device *, int, u32, u32);
#include "drmP.h"
#include "nouveau_drv.h"
-#include "nouveau_ramht.h"
+#include <core/ramht.h>
#include "nouveau_software.h"
#include "nv50_display.h"
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_vm.h"
-
-void
-nvc0_vm_map_pgt(struct nouveau_gpuobj *pgd, u32 index,
- struct nouveau_gpuobj *pgt[2])
-{
- u32 pde[2] = { 0, 0 };
-
- if (pgt[0])
- pde[1] = 0x00000001 | (pgt[0]->vinst >> 8);
- if (pgt[1])
- pde[0] = 0x00000001 | (pgt[1]->vinst >> 8);
-
- nv_wo32(pgd, (index * 8) + 0, pde[0]);
- nv_wo32(pgd, (index * 8) + 4, pde[1]);
-}
-
-static inline u64
-nvc0_vm_addr(struct nouveau_vma *vma, u64 phys, u32 memtype, u32 target)
-{
- phys >>= 8;
-
- phys |= 0x00000001; /* present */
- if (vma->access & NV_MEM_ACCESS_SYS)
- phys |= 0x00000002;
-
- phys |= ((u64)target << 32);
- phys |= ((u64)memtype << 36);
-
- return phys;
-}
-
-void
-nvc0_vm_map(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
-{
- u32 next = 1 << (vma->node->type - 8);
-
- phys = nvc0_vm_addr(vma, phys, mem->memtype, 0);
- pte <<= 3;
- while (cnt--) {
- nv_wo32(pgt, pte + 0, lower_32_bits(phys));
- nv_wo32(pgt, pte + 4, upper_32_bits(phys));
- phys += next;
- pte += 8;
- }
-}
-
-void
-nvc0_vm_map_sg(struct nouveau_vma *vma, struct nouveau_gpuobj *pgt,
- struct nouveau_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
-{
- u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
-
- pte <<= 3;
- while (cnt--) {
- u64 phys = nvc0_vm_addr(vma, *list++, mem->memtype, target);
- nv_wo32(pgt, pte + 0, lower_32_bits(phys));
- nv_wo32(pgt, pte + 4, upper_32_bits(phys));
- pte += 8;
- }
-}
-
-void
-nvc0_vm_unmap(struct nouveau_gpuobj *pgt, u32 pte, u32 cnt)
-{
- pte <<= 3;
- while (cnt--) {
- nv_wo32(pgt, pte + 0, 0x00000000);
- nv_wo32(pgt, pte + 4, 0x00000000);
- pte += 8;
- }
-}
-
-void
-nvc0_vm_flush(struct nouveau_vm *vm)
-{
- struct drm_nouveau_private *dev_priv = vm->dev->dev_private;
- struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct drm_device *dev = vm->dev;
- struct nouveau_vm_pgd *vpgd;
- unsigned long flags;
- u32 engine;
-
- engine = 1;
- if (vm == dev_priv->bar1_vm || vm == dev_priv->bar3_vm)
- engine |= 4;
-
- pinstmem->flush(vm->dev);
-
- spin_lock_irqsave(&dev_priv->vm_lock, flags);
- list_for_each_entry(vpgd, &vm->pgd_list, head) {
- /* looks like maybe a "free flush slots" counter, the
- * faster you write to 0x100cbc to more it decreases
- */
- if (!nv_wait_ne(dev, 0x100c80, 0x00ff0000, 0x00000000)) {
- NV_ERROR(dev, "vm timeout 0: 0x%08x %d\n",
- nv_rd32(dev, 0x100c80), engine);
- }
- nv_wr32(dev, 0x100cb8, vpgd->obj->vinst >> 8);
- nv_wr32(dev, 0x100cbc, 0x80000000 | engine);
- /* wait for flush to be queued? */
- if (!nv_wait(dev, 0x100c80, 0x00008000, 0x00008000)) {
- NV_ERROR(dev, "vm timeout 1: 0x%08x %d\n",
- nv_rd32(dev, 0x100c80), engine);
- }
- }
- spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
-}
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-
-/* 0 = unsupported
- * 1 = non-compressed
- * 3 = compressed
- */
-static const u8 types[256] = {
- 1, 1, 3, 3, 3, 3, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
- 0, 1, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 3, 3,
- 3, 3, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 0, 1, 1, 1, 1, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
- 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
- 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
- 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
- 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
-};
-
-bool
-nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
-{
- u8 memtype = (tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK) >> 8;
- return likely((types[memtype] == 1));
-}
-
-int
-nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
- u32 type, struct nouveau_mem **pmem)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_mm *mm = &dev_priv->engine.vram.mm;
- struct nouveau_mm_node *r;
- struct nouveau_mem *mem;
- int ret;
-
- size >>= 12;
- align >>= 12;
- ncmin >>= 12;
-
- mem = kzalloc(sizeof(*mem), GFP_KERNEL);
- if (!mem)
- return -ENOMEM;
-
- INIT_LIST_HEAD(&mem->regions);
- mem->dev = dev_priv->dev;
- mem->memtype = (type & 0xff);
- mem->size = size;
-
- mutex_lock(&mm->mutex);
- do {
- ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
- if (ret) {
- mutex_unlock(&mm->mutex);
- nv50_vram_del(dev, &mem);
- return ret;
- }
-
- list_add_tail(&r->rl_entry, &mem->regions);
- size -= r->length;
- } while (size);
- mutex_unlock(&mm->mutex);
-
- r = list_first_entry(&mem->regions, struct nouveau_mm_node, rl_entry);
- mem->offset = (u64)r->offset << 12;
- *pmem = mem;
- return 0;
-}
-
-int
-nvc0_vram_init(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
- u32 parts = nv_rd32(dev, 0x022438);
- u32 pmask = nv_rd32(dev, 0x022554);
- u32 bsize = nv_rd32(dev, 0x10f20c);
- u32 offset, length;
- bool uniform = true;
- int ret, part;
-
- NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
- NV_DEBUG(dev, "parts 0x%08x mask 0x%08x\n", parts, pmask);
-
- dev_priv->vram_type = nouveau_mem_vbios_type(dev);
- dev_priv->vram_rank_B = !!(nv_rd32(dev, 0x10f200) & 0x00000004);
-
- /* read amount of vram attached to each memory controller */
- for (part = 0; part < parts; part++) {
- if (!(pmask & (1 << part))) {
- u32 psize = nv_rd32(dev, 0x11020c + (part * 0x1000));
- if (psize != bsize) {
- if (psize < bsize)
- bsize = psize;
- uniform = false;
- }
-
- NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
- dev_priv->vram_size += (u64)psize << 20;
- }
- }
-
- /* if all controllers have the same amount attached, there's no holes */
- if (uniform) {
- offset = rsvd_head;
- length = (dev_priv->vram_size >> 12) - rsvd_head - rsvd_tail;
- return nouveau_mm_init(&vram->mm, offset, length, 1);
- }
-
- /* otherwise, address lowest common amount from 0GiB */
- ret = nouveau_mm_init(&vram->mm, rsvd_head, (bsize << 8) * parts, 1);
- if (ret)
- return ret;
-
- /* and the rest starting from (8GiB + common_size) */
- offset = (0x0200000000ULL >> 12) + (bsize << 8);
- length = (dev_priv->vram_size >> 12) - (bsize << 8) - rsvd_tail;
-
- ret = nouveau_mm_init(&vram->mm, offset, length, 0);
- if (ret) {
- nouveau_mm_fini(&vram->mm);
- return ret;
- }
-
- return 0;
-}
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nouveau_fifo.h"
-
-#define NVE0_FIFO_ENGINE_NUM 32
-
-static void nve0_fifo_isr(struct drm_device *);
-
-struct nve0_fifo_engine {
- struct nouveau_gpuobj *playlist[2];
- int cur_playlist;
-};
-
-struct nve0_fifo_priv {
- struct nouveau_fifo_priv base;
- struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
- struct {
- struct nouveau_gpuobj *mem;
- struct nouveau_vma bar;
- } user;
- int spoon_nr;
-};
-
-struct nve0_fifo_chan {
- struct nouveau_fifo_chan base;
- u32 engine;
-};
-
-static void
-nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct nve0_fifo_engine *peng = &priv->engine[engine];
- struct nouveau_gpuobj *cur;
- u32 match = (engine << 16) | 0x00000001;
- int ret, i, p;
-
- cur = peng->playlist[peng->cur_playlist];
- if (unlikely(cur == NULL)) {
- ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
- if (ret) {
- NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
- return;
- }
-
- peng->playlist[peng->cur_playlist] = cur;
- }
-
- peng->cur_playlist = !peng->cur_playlist;
-
- for (i = 0, p = 0; i < priv->base.channels; i++) {
- u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
- if (ctrl != match)
- continue;
- nv_wo32(cur, p + 0, i);
- nv_wo32(cur, p + 4, 0x00000000);
- p += 8;
- }
- pinstmem->flush(dev);
-
- nv_wr32(dev, 0x002270, cur->vinst >> 12);
- nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
- if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
- NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
-}
-
-static int
-nve0_fifo_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nve0_fifo_priv *priv = nv_engine(dev, engine);
- struct nve0_fifo_chan *fctx;
- u64 usermem = priv->user.mem->vinst + chan->id * 512;
- u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
- int ret = 0, i;
-
- fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
- if (!fctx)
- return -ENOMEM;
-
- fctx->engine = 0; /* PGRAPH */
-
- /* allocate vram for control regs, map into polling area */
- chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
- priv->user.bar.offset + (chan->id * 512), 512);
- if (!chan->user) {
- ret = -ENOMEM;
- goto error;
- }
-
- for (i = 0; i < 0x100; i += 4)
- nv_wo32(chan->ramin, i, 0x00000000);
- nv_wo32(chan->ramin, 0x08, lower_32_bits(usermem));
- nv_wo32(chan->ramin, 0x0c, upper_32_bits(usermem));
- nv_wo32(chan->ramin, 0x10, 0x0000face);
- nv_wo32(chan->ramin, 0x30, 0xfffff902);
- nv_wo32(chan->ramin, 0x48, lower_32_bits(ib_virt));
- nv_wo32(chan->ramin, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
- upper_32_bits(ib_virt));
- nv_wo32(chan->ramin, 0x84, 0x20400000);
- nv_wo32(chan->ramin, 0x94, 0x30000001);
- nv_wo32(chan->ramin, 0x9c, 0x00000100);
- nv_wo32(chan->ramin, 0xac, 0x0000001f);
- nv_wo32(chan->ramin, 0xe4, 0x00000000);
- nv_wo32(chan->ramin, 0xe8, chan->id);
- nv_wo32(chan->ramin, 0xf8, 0x10003080); /* 0x002310 */
- nv_wo32(chan->ramin, 0xfc, 0x10000010); /* 0x002350 */
- pinstmem->flush(dev);
-
- nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
- (chan->ramin->vinst >> 12));
- nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
- nve0_fifo_playlist_update(dev, fctx->engine);
- nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
-
-error:
- if (ret)
- priv->base.base.context_del(chan, engine);
- return ret;
-}
-
-static void
-nve0_fifo_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nve0_fifo_chan *fctx = chan->engctx[engine];
- struct drm_device *dev = chan->dev;
-
- nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
- nv_wr32(dev, 0x002634, chan->id);
- if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
- NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
- nve0_fifo_playlist_update(dev, fctx->engine);
- nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
-
- if (chan->user) {
- iounmap(chan->user);
- chan->user = NULL;
- }
-
- chan->engctx[NVOBJ_ENGINE_FIFO] = NULL;
- kfree(fctx);
-}
-
-static int
-nve0_fifo_init(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nve0_fifo_priv *priv = nv_engine(dev, engine);
- struct nve0_fifo_chan *fctx;
- int i;
-
- /* reset PFIFO, enable all available PSUBFIFO areas */
- nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
- nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
- nv_wr32(dev, 0x000204, 0xffffffff);
-
- priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
- NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
-
- /* PSUBFIFO[n] */
- for (i = 0; i < priv->spoon_nr; i++) {
- nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
- nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
- nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
- }
-
- nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
-
- nv_wr32(dev, 0x002a00, 0xffffffff);
- nv_wr32(dev, 0x002100, 0xffffffff);
- nv_wr32(dev, 0x002140, 0xbfffffff);
-
- /* restore PFIFO context table */
- for (i = 0; i < priv->base.channels; i++) {
- struct nouveau_channel *chan = dev_priv->channels.ptr[i];
- if (!chan || !(fctx = chan->engctx[engine]))
- continue;
-
- nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
- (chan->ramin->vinst >> 12));
- nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
- nve0_fifo_playlist_update(dev, fctx->engine);
- nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
- }
-
- return 0;
-}
-
-static int
-nve0_fifo_fini(struct drm_device *dev, int engine, bool suspend)
-{
- struct nve0_fifo_priv *priv = nv_engine(dev, engine);
- int i;
-
- for (i = 0; i < priv->base.channels; i++) {
- if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
- continue;
-
- nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
- nv_wr32(dev, 0x002634, i);
- if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
- NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
- i, nv_rd32(dev, 0x002634));
- return -EBUSY;
- }
- }
-
- nv_wr32(dev, 0x002140, 0x00000000);
- return 0;
-}
-
-struct nouveau_enum nve0_fifo_fault_unit[] = {
- {}
-};
-
-struct nouveau_enum nve0_fifo_fault_reason[] = {
- { 0x00, "PT_NOT_PRESENT" },
- { 0x01, "PT_TOO_SHORT" },
- { 0x02, "PAGE_NOT_PRESENT" },
- { 0x03, "VM_LIMIT_EXCEEDED" },
- { 0x04, "NO_CHANNEL" },
- { 0x05, "PAGE_SYSTEM_ONLY" },
- { 0x06, "PAGE_READ_ONLY" },
- { 0x0a, "COMPRESSED_SYSRAM" },
- { 0x0c, "INVALID_STORAGE_TYPE" },
- {}
-};
-
-struct nouveau_enum nve0_fifo_fault_hubclient[] = {
- {}
-};
-
-struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
- {}
-};
-
-struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
- { 0x00200000, "ILLEGAL_MTHD" },
- { 0x00800000, "EMPTY_SUBC" },
- {}
-};
-
-static void
-nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
-{
- u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
- u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
- u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
- u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
- u32 client = (stat & 0x00001f00) >> 8;
-
- NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
- (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
- nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
- printk("] from ");
- nouveau_enum_print(nve0_fifo_fault_unit, unit);
- if (stat & 0x00000040) {
- printk("/");
- nouveau_enum_print(nve0_fifo_fault_hubclient, client);
- } else {
- printk("/GPC%d/", (stat & 0x1f000000) >> 24);
- nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
- }
- printk(" on channel 0x%010llx\n", (u64)inst << 12);
-}
-
-static int
-nve0_fifo_page_flip(struct drm_device *dev, u32 chid)
-{
- struct nve0_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
- unsigned long flags;
- int ret = -EINVAL;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- if (likely(chid >= 0 && chid < priv->base.channels)) {
- chan = dev_priv->channels.ptr[chid];
- if (likely(chan))
- ret = nouveau_finish_page_flip(chan, NULL);
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return ret;
-}
-
-static void
-nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
-{
- u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
- u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
- u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
- u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
- u32 subc = (addr & 0x00070000);
- u32 mthd = (addr & 0x00003ffc);
- u32 show = stat;
-
- if (stat & 0x00200000) {
- if (mthd == 0x0054) {
- if (!nve0_fifo_page_flip(dev, chid))
- show &= ~0x00200000;
- }
- }
-
- if (show) {
- NV_INFO(dev, "PFIFO%d:", unit);
- nouveau_bitfield_print(nve0_fifo_subfifo_intr, show);
- NV_INFO(dev, "PFIFO%d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
- unit, chid, subc, mthd, data);
- }
-
- nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
- nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
-}
-
-static void
-nve0_fifo_isr(struct drm_device *dev)
-{
- u32 mask = nv_rd32(dev, 0x002140);
- u32 stat = nv_rd32(dev, 0x002100) & mask;
-
- if (stat & 0x00000100) {
- NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
- nv_wr32(dev, 0x002100, 0x00000100);
- stat &= ~0x00000100;
- }
-
- if (stat & 0x10000000) {
- u32 units = nv_rd32(dev, 0x00259c);
- u32 u = units;
-
- while (u) {
- int i = ffs(u) - 1;
- nve0_fifo_isr_vm_fault(dev, i);
- u &= ~(1 << i);
- }
-
- nv_wr32(dev, 0x00259c, units);
- stat &= ~0x10000000;
- }
-
- if (stat & 0x20000000) {
- u32 units = nv_rd32(dev, 0x0025a0);
- u32 u = units;
-
- while (u) {
- int i = ffs(u) - 1;
- nve0_fifo_isr_subfifo_intr(dev, i);
- u &= ~(1 << i);
- }
-
- nv_wr32(dev, 0x0025a0, units);
- stat &= ~0x20000000;
- }
-
- if (stat & 0x40000000) {
- NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
- nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
- stat &= ~0x40000000;
- }
-
- if (stat) {
- NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
- nv_wr32(dev, 0x002100, stat);
- nv_wr32(dev, 0x002140, 0);
- }
-}
-
-static void
-nve0_fifo_destroy(struct drm_device *dev, int engine)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nve0_fifo_priv *priv = nv_engine(dev, engine);
- int i;
-
- nouveau_vm_put(&priv->user.bar);
- nouveau_gpuobj_ref(NULL, &priv->user.mem);
-
- for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
- nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
- nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
- }
-
- dev_priv->eng[engine] = NULL;
- kfree(priv);
-}
-
-int
-nve0_fifo_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nve0_fifo_priv *priv;
- int ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.base.destroy = nve0_fifo_destroy;
- priv->base.base.init = nve0_fifo_init;
- priv->base.base.fini = nve0_fifo_fini;
- priv->base.base.context_new = nve0_fifo_context_new;
- priv->base.base.context_del = nve0_fifo_context_del;
- priv->base.channels = 4096;
- dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
-
- ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 512, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
- if (ret)
- goto error;
-
- ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
- 12, NV_MEM_ACCESS_RW, &priv->user.bar);
- if (ret)
- goto error;
-
- nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
-
- nouveau_irq_register(dev, 8, nve0_fifo_isr);
-error:
- if (ret)
- priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
- return ret;
-}
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include <linux/firmware.h>
-#include <linux/module.h>
-
-#include "drmP.h"
-
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nouveau_fifo.h"
-
-#include "nve0_graph.h"
-
-static void
-nve0_graph_ctxctl_debug_unit(struct drm_device *dev, u32 base)
-{
- NV_INFO(dev, "PGRAPH: %06x - done 0x%08x\n", base,
- nv_rd32(dev, base + 0x400));
- NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(dev, base + 0x800), nv_rd32(dev, base + 0x804),
- nv_rd32(dev, base + 0x808), nv_rd32(dev, base + 0x80c));
- NV_INFO(dev, "PGRAPH: %06x - stat 0x%08x 0x%08x 0x%08x 0x%08x\n", base,
- nv_rd32(dev, base + 0x810), nv_rd32(dev, base + 0x814),
- nv_rd32(dev, base + 0x818), nv_rd32(dev, base + 0x81c));
-}
-
-static void
-nve0_graph_ctxctl_debug(struct drm_device *dev)
-{
- u32 gpcnr = nv_rd32(dev, 0x409604) & 0xffff;
- u32 gpc;
-
- nve0_graph_ctxctl_debug_unit(dev, 0x409000);
- for (gpc = 0; gpc < gpcnr; gpc++)
- nve0_graph_ctxctl_debug_unit(dev, 0x502000 + (gpc * 0x8000));
-}
-
-static int
-nve0_graph_load_context(struct nouveau_channel *chan)
-{
- struct drm_device *dev = chan->dev;
-
- nv_wr32(dev, 0x409840, 0x00000030);
- nv_wr32(dev, 0x409500, 0x80000000 | chan->ramin->vinst >> 12);
- nv_wr32(dev, 0x409504, 0x00000003);
- if (!nv_wait(dev, 0x409800, 0x00000010, 0x00000010))
- NV_ERROR(dev, "PGRAPH: load_ctx timeout\n");
-
- return 0;
-}
-
-static int
-nve0_graph_unload_context_to(struct drm_device *dev, u64 chan)
-{
- nv_wr32(dev, 0x409840, 0x00000003);
- nv_wr32(dev, 0x409500, 0x80000000 | chan >> 12);
- nv_wr32(dev, 0x409504, 0x00000009);
- if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000000)) {
- NV_ERROR(dev, "PGRAPH: unload_ctx timeout\n");
- return -EBUSY;
- }
-
- return 0;
-}
-
-static int
-nve0_graph_construct_context(struct nouveau_channel *chan)
-{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
- struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
- struct drm_device *dev = chan->dev;
- int ret, i;
- u32 *ctx;
-
- ctx = kmalloc(priv->grctx_size, GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
-
- nve0_graph_load_context(chan);
-
- nv_wo32(grch->grctx, 0x1c, 1);
- nv_wo32(grch->grctx, 0x20, 0);
- nv_wo32(grch->grctx, 0x28, 0);
- nv_wo32(grch->grctx, 0x2c, 0);
- dev_priv->engine.instmem.flush(dev);
-
- ret = nve0_grctx_generate(chan);
- if (ret)
- goto err;
-
- ret = nve0_graph_unload_context_to(dev, chan->ramin->vinst);
- if (ret)
- goto err;
-
- for (i = 0; i < priv->grctx_size; i += 4)
- ctx[i / 4] = nv_ro32(grch->grctx, i);
-
- priv->grctx_vals = ctx;
- return 0;
-
-err:
- kfree(ctx);
- return ret;
-}
-
-static int
-nve0_graph_create_context_mmio_list(struct nouveau_channel *chan)
-{
- struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
- struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
- struct drm_device *dev = chan->dev;
- u32 magic[GPC_MAX][2];
- u16 offset = 0x0000;
- int gpc;
- int ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x3000, 256, NVOBJ_FLAG_VM,
- &grch->unk408004);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x8000, 256, NVOBJ_FLAG_VM,
- &grch->unk40800c);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 384 * 1024, 4096,
- NVOBJ_FLAG_VM | NVOBJ_FLAG_VM_USER,
- &grch->unk418810);
- if (ret)
- return ret;
-
- ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, NVOBJ_FLAG_VM,
- &grch->mmio);
- if (ret)
- return ret;
-
-#define mmio(r,v) do { \
- nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 0, (r)); \
- nv_wo32(grch->mmio, (grch->mmio_nr * 8) + 4, (v)); \
- grch->mmio_nr++; \
-} while (0)
- mmio(0x40800c, grch->unk40800c->linst >> 8);
- mmio(0x408010, 0x80000000);
- mmio(0x419004, grch->unk40800c->linst >> 8);
- mmio(0x419008, 0x00000000);
- mmio(0x4064cc, 0x80000000);
- mmio(0x408004, grch->unk408004->linst >> 8);
- mmio(0x408008, 0x80000030);
- mmio(0x418808, grch->unk408004->linst >> 8);
- mmio(0x41880c, 0x80000030);
- mmio(0x4064c8, 0x01800600);
- mmio(0x418810, 0x80000000 | grch->unk418810->linst >> 12);
- mmio(0x419848, 0x10000000 | grch->unk418810->linst >> 12);
- mmio(0x405830, 0x02180648);
- mmio(0x4064c4, 0x0192ffff);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- u16 magic0 = 0x0218 * priv->tpc_nr[gpc];
- u16 magic1 = 0x0648 * priv->tpc_nr[gpc];
- magic[gpc][0] = 0x10000000 | (magic0 << 16) | offset;
- magic[gpc][1] = 0x00000000 | (magic1 << 16);
- offset += 0x0324 * priv->tpc_nr[gpc];
- }
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- mmio(GPC_UNIT(gpc, 0x30c0), magic[gpc][0]);
- mmio(GPC_UNIT(gpc, 0x30e4), magic[gpc][1] | offset);
- offset += 0x07ff * priv->tpc_nr[gpc];
- }
-
- mmio(0x17e91c, 0x06060609);
- mmio(0x17e920, 0x00090a05);
-#undef mmio
- return 0;
-}
-
-static int
-nve0_graph_context_new(struct nouveau_channel *chan, int engine)
-{
- struct drm_device *dev = chan->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
- struct nve0_graph_priv *priv = nv_engine(dev, engine);
- struct nve0_graph_chan *grch;
- struct nouveau_gpuobj *grctx;
- int ret, i;
-
- grch = kzalloc(sizeof(*grch), GFP_KERNEL);
- if (!grch)
- return -ENOMEM;
- chan->engctx[NVOBJ_ENGINE_GR] = grch;
-
- ret = nouveau_gpuobj_new(dev, chan, priv->grctx_size, 256,
- NVOBJ_FLAG_VM | NVOBJ_FLAG_ZERO_ALLOC,
- &grch->grctx);
- if (ret)
- goto error;
- grctx = grch->grctx;
-
- ret = nve0_graph_create_context_mmio_list(chan);
- if (ret)
- goto error;
-
- nv_wo32(chan->ramin, 0x0210, lower_32_bits(grctx->linst) | 4);
- nv_wo32(chan->ramin, 0x0214, upper_32_bits(grctx->linst));
- pinstmem->flush(dev);
-
- if (!priv->grctx_vals) {
- ret = nve0_graph_construct_context(chan);
- if (ret)
- goto error;
- }
-
- for (i = 0; i < priv->grctx_size; i += 4)
- nv_wo32(grctx, i, priv->grctx_vals[i / 4]);
- nv_wo32(grctx, 0xf4, 0);
- nv_wo32(grctx, 0xf8, 0);
- nv_wo32(grctx, 0x10, grch->mmio_nr);
- nv_wo32(grctx, 0x14, lower_32_bits(grch->mmio->linst));
- nv_wo32(grctx, 0x18, upper_32_bits(grch->mmio->linst));
- nv_wo32(grctx, 0x1c, 1);
- nv_wo32(grctx, 0x20, 0);
- nv_wo32(grctx, 0x28, 0);
- nv_wo32(grctx, 0x2c, 0);
-
- pinstmem->flush(dev);
- return 0;
-
-error:
- priv->base.context_del(chan, engine);
- return ret;
-}
-
-static void
-nve0_graph_context_del(struct nouveau_channel *chan, int engine)
-{
- struct nve0_graph_chan *grch = chan->engctx[engine];
-
- nouveau_gpuobj_ref(NULL, &grch->mmio);
- nouveau_gpuobj_ref(NULL, &grch->unk418810);
- nouveau_gpuobj_ref(NULL, &grch->unk40800c);
- nouveau_gpuobj_ref(NULL, &grch->unk408004);
- nouveau_gpuobj_ref(NULL, &grch->grctx);
- chan->engctx[engine] = NULL;
-}
-
-static int
-nve0_graph_object_new(struct nouveau_channel *chan, int engine,
- u32 handle, u16 class)
-{
- return 0;
-}
-
-static int
-nve0_graph_fini(struct drm_device *dev, int engine, bool suspend)
-{
- return 0;
-}
-
-static void
-nve0_graph_init_obj418880(struct drm_device *dev)
-{
- struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- int i;
-
- nv_wr32(dev, GPC_BCAST(0x0880), 0x00000000);
- nv_wr32(dev, GPC_BCAST(0x08a4), 0x00000000);
- for (i = 0; i < 4; i++)
- nv_wr32(dev, GPC_BCAST(0x0888) + (i * 4), 0x00000000);
- nv_wr32(dev, GPC_BCAST(0x08b4), priv->unk4188b4->vinst >> 8);
- nv_wr32(dev, GPC_BCAST(0x08b8), priv->unk4188b8->vinst >> 8);
-}
-
-static void
-nve0_graph_init_regs(struct drm_device *dev)
-{
- nv_wr32(dev, 0x400080, 0x003083c2);
- nv_wr32(dev, 0x400088, 0x0001ffe7);
- nv_wr32(dev, 0x40008c, 0x00000000);
- nv_wr32(dev, 0x400090, 0x00000030);
- nv_wr32(dev, 0x40013c, 0x003901f7);
- nv_wr32(dev, 0x400140, 0x00000100);
- nv_wr32(dev, 0x400144, 0x00000000);
- nv_wr32(dev, 0x400148, 0x00000110);
- nv_wr32(dev, 0x400138, 0x00000000);
- nv_wr32(dev, 0x400130, 0x00000000);
- nv_wr32(dev, 0x400134, 0x00000000);
- nv_wr32(dev, 0x400124, 0x00000002);
-}
-
-static void
-nve0_graph_init_units(struct drm_device *dev)
-{
- nv_wr32(dev, 0x409ffc, 0x00000000);
- nv_wr32(dev, 0x409c14, 0x00003e3e);
- nv_wr32(dev, 0x409c24, 0x000f0000);
-
- nv_wr32(dev, 0x404000, 0xc0000000);
- nv_wr32(dev, 0x404600, 0xc0000000);
- nv_wr32(dev, 0x408030, 0xc0000000);
- nv_wr32(dev, 0x404490, 0xc0000000);
- nv_wr32(dev, 0x406018, 0xc0000000);
- nv_wr32(dev, 0x407020, 0xc0000000);
- nv_wr32(dev, 0x405840, 0xc0000000);
- nv_wr32(dev, 0x405844, 0x00ffffff);
-
- nv_mask(dev, 0x419cc0, 0x00000008, 0x00000008);
- nv_mask(dev, 0x419eb4, 0x00001000, 0x00001000);
-
-}
-
-static void
-nve0_graph_init_gpc_0(struct drm_device *dev)
-{
- struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, priv->tpc_total);
- u32 data[TPC_MAX / 8];
- u8 tpcnr[GPC_MAX];
- int i, gpc, tpc;
-
- nv_wr32(dev, GPC_UNIT(0, 0x3018), 0x00000001);
-
- memset(data, 0x00, sizeof(data));
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
- for (i = 0, gpc = -1; i < priv->tpc_total; i++) {
- do {
- gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpcnr[gpc]);
- tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
-
- data[i / 8] |= tpc << ((i % 8) * 4);
- }
-
- nv_wr32(dev, GPC_BCAST(0x0980), data[0]);
- nv_wr32(dev, GPC_BCAST(0x0984), data[1]);
- nv_wr32(dev, GPC_BCAST(0x0988), data[2]);
- nv_wr32(dev, GPC_BCAST(0x098c), data[3]);
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(dev, GPC_UNIT(gpc, 0x0914), priv->magic_not_rop_nr << 8 |
- priv->tpc_nr[gpc]);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0910), 0x00040000 | priv->tpc_total);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0918), magicgpc918);
- }
-
- nv_wr32(dev, GPC_BCAST(0x1bd4), magicgpc918);
- nv_wr32(dev, GPC_BCAST(0x08ac), nv_rd32(dev, 0x100800));
-}
-
-static void
-nve0_graph_init_gpc_1(struct drm_device *dev)
-{
- struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- int gpc, tpc;
-
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- nv_wr32(dev, GPC_UNIT(gpc, 0x3038), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0420), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0900), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x1028), 0xc0000000);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0824), 0xc0000000);
- for (tpc = 0; tpc < priv->tpc_nr[gpc]; tpc++) {
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x644), 0x001ffffe);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x64c), 0x0000000f);
- }
- nv_wr32(dev, GPC_UNIT(gpc, 0x2c90), 0xffffffff);
- nv_wr32(dev, GPC_UNIT(gpc, 0x2c94), 0xffffffff);
- }
-}
-
-static void
-nve0_graph_init_rop(struct drm_device *dev)
-{
- struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- int rop;
-
- for (rop = 0; rop < priv->rop_nr; rop++) {
- nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
- nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
- nv_wr32(dev, ROP_UNIT(rop, 0x204), 0xffffffff);
- nv_wr32(dev, ROP_UNIT(rop, 0x208), 0xffffffff);
- }
-}
-
-static void
-nve0_graph_init_fuc(struct drm_device *dev, u32 fuc_base,
- struct nve0_graph_fuc *code, struct nve0_graph_fuc *data)
-{
- int i;
-
- nv_wr32(dev, fuc_base + 0x01c0, 0x01000000);
- for (i = 0; i < data->size / 4; i++)
- nv_wr32(dev, fuc_base + 0x01c4, data->data[i]);
-
- nv_wr32(dev, fuc_base + 0x0180, 0x01000000);
- for (i = 0; i < code->size / 4; i++) {
- if ((i & 0x3f) == 0)
- nv_wr32(dev, fuc_base + 0x0188, i >> 6);
- nv_wr32(dev, fuc_base + 0x0184, code->data[i]);
- }
-}
-
-static int
-nve0_graph_init_ctxctl(struct drm_device *dev)
-{
- struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- u32 r000260;
-
- /* load fuc microcode */
- r000260 = nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
- nve0_graph_init_fuc(dev, 0x409000, &priv->fuc409c, &priv->fuc409d);
- nve0_graph_init_fuc(dev, 0x41a000, &priv->fuc41ac, &priv->fuc41ad);
- nv_wr32(dev, 0x000260, r000260);
-
- /* start both of them running */
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x41a10c, 0x00000000);
- nv_wr32(dev, 0x40910c, 0x00000000);
- nv_wr32(dev, 0x41a100, 0x00000002);
- nv_wr32(dev, 0x409100, 0x00000002);
- if (!nv_wait(dev, 0x409800, 0x00000001, 0x00000001))
- NV_INFO(dev, "0x409800 wait failed\n");
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x7fffffff);
- nv_wr32(dev, 0x409504, 0x00000021);
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000010);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x10 timeout\n");
- return -EBUSY;
- }
- priv->grctx_size = nv_rd32(dev, 0x409800);
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000016);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x16 timeout\n");
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x409840, 0xffffffff);
- nv_wr32(dev, 0x409500, 0x00000000);
- nv_wr32(dev, 0x409504, 0x00000025);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x25 timeout\n");
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x409800, 0x00000000);
- nv_wr32(dev, 0x409500, 0x00000001);
- nv_wr32(dev, 0x409504, 0x00000030);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x30 timeout\n");
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x409810, 0xb00095c8);
- nv_wr32(dev, 0x409800, 0x00000000);
- nv_wr32(dev, 0x409500, 0x00000001);
- nv_wr32(dev, 0x409504, 0x00000031);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x31 timeout\n");
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x409810, 0x00080420);
- nv_wr32(dev, 0x409800, 0x00000000);
- nv_wr32(dev, 0x409500, 0x00000001);
- nv_wr32(dev, 0x409504, 0x00000032);
- if (!nv_wait_ne(dev, 0x409800, 0xffffffff, 0x00000000)) {
- NV_ERROR(dev, "fuc09 req 0x32 timeout\n");
- return -EBUSY;
- }
-
- nv_wr32(dev, 0x409614, 0x00000070);
- nv_wr32(dev, 0x409614, 0x00000770);
- nv_wr32(dev, 0x40802c, 0x00000001);
- return 0;
-}
-
-static int
-nve0_graph_init(struct drm_device *dev, int engine)
-{
- int ret;
-
- nv_mask(dev, 0x000200, 0x18001000, 0x00000000);
- nv_mask(dev, 0x000200, 0x18001000, 0x18001000);
-
- nve0_graph_init_obj418880(dev);
- nve0_graph_init_regs(dev);
- nve0_graph_init_gpc_0(dev);
-
- nv_wr32(dev, 0x400500, 0x00010001);
- nv_wr32(dev, 0x400100, 0xffffffff);
- nv_wr32(dev, 0x40013c, 0xffffffff);
-
- nve0_graph_init_units(dev);
- nve0_graph_init_gpc_1(dev);
- nve0_graph_init_rop(dev);
-
- nv_wr32(dev, 0x400108, 0xffffffff);
- nv_wr32(dev, 0x400138, 0xffffffff);
- nv_wr32(dev, 0x400118, 0xffffffff);
- nv_wr32(dev, 0x400130, 0xffffffff);
- nv_wr32(dev, 0x40011c, 0xffffffff);
- nv_wr32(dev, 0x400134, 0xffffffff);
- nv_wr32(dev, 0x400054, 0x34ce3464);
-
- ret = nve0_graph_init_ctxctl(dev);
- if (ret)
- return ret;
-
- return 0;
-}
-
-int
-nve0_graph_isr_chid(struct drm_device *dev, u64 inst)
-{
- struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&dev_priv->channels.lock, flags);
- for (i = 0; i < pfifo->channels; i++) {
- chan = dev_priv->channels.ptr[i];
- if (!chan || !chan->ramin)
- continue;
-
- if (inst == chan->ramin->vinst)
- break;
- }
- spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
- return i;
-}
-
-static void
-nve0_graph_ctxctl_isr(struct drm_device *dev)
-{
- u32 ustat = nv_rd32(dev, 0x409c18);
-
- if (ustat & 0x00000001)
- NV_INFO(dev, "PGRAPH: CTXCTRL ucode error\n");
- if (ustat & 0x00080000)
- NV_INFO(dev, "PGRAPH: CTXCTRL watchdog timeout\n");
- if (ustat & ~0x00080001)
- NV_INFO(dev, "PGRAPH: CTXCTRL 0x%08x\n", ustat);
-
- nve0_graph_ctxctl_debug(dev);
- nv_wr32(dev, 0x409c20, ustat);
-}
-
-static void
-nve0_graph_trap_isr(struct drm_device *dev, int chid)
-{
- struct nve0_graph_priv *priv = nv_engine(dev, NVOBJ_ENGINE_GR);
- u32 trap = nv_rd32(dev, 0x400108);
- int rop;
-
- if (trap & 0x00000001) {
- u32 stat = nv_rd32(dev, 0x404000);
- NV_INFO(dev, "PGRAPH: DISPATCH ch %d 0x%08x\n", chid, stat);
- nv_wr32(dev, 0x404000, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x00000001);
- trap &= ~0x00000001;
- }
-
- if (trap & 0x00000010) {
- u32 stat = nv_rd32(dev, 0x405840);
- NV_INFO(dev, "PGRAPH: SHADER ch %d 0x%08x\n", chid, stat);
- nv_wr32(dev, 0x405840, 0xc0000000);
- nv_wr32(dev, 0x400108, 0x00000010);
- trap &= ~0x00000010;
- }
-
- if (trap & 0x02000000) {
- for (rop = 0; rop < priv->rop_nr; rop++) {
- u32 statz = nv_rd32(dev, ROP_UNIT(rop, 0x070));
- u32 statc = nv_rd32(dev, ROP_UNIT(rop, 0x144));
- NV_INFO(dev, "PGRAPH: ROP%d ch %d 0x%08x 0x%08x\n",
- rop, chid, statz, statc);
- nv_wr32(dev, ROP_UNIT(rop, 0x070), 0xc0000000);
- nv_wr32(dev, ROP_UNIT(rop, 0x144), 0xc0000000);
- }
- nv_wr32(dev, 0x400108, 0x02000000);
- trap &= ~0x02000000;
- }
-
- if (trap) {
- NV_INFO(dev, "PGRAPH: TRAP ch %d 0x%08x\n", chid, trap);
- nv_wr32(dev, 0x400108, trap);
- }
-}
-
-static void
-nve0_graph_isr(struct drm_device *dev)
-{
- u64 inst = (u64)(nv_rd32(dev, 0x409b00) & 0x0fffffff) << 12;
- u32 chid = nve0_graph_isr_chid(dev, inst);
- u32 stat = nv_rd32(dev, 0x400100);
- u32 addr = nv_rd32(dev, 0x400704);
- u32 mthd = (addr & 0x00003ffc);
- u32 subc = (addr & 0x00070000) >> 16;
- u32 data = nv_rd32(dev, 0x400708);
- u32 code = nv_rd32(dev, 0x400110);
- u32 class = nv_rd32(dev, 0x404200 + (subc * 4));
-
- if (stat & 0x00000010) {
- if (nouveau_gpuobj_mthd_call2(dev, chid, class, mthd, data)) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_MTHD ch %d [0x%010llx] "
- "subc %d class 0x%04x mthd 0x%04x "
- "data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- }
- nv_wr32(dev, 0x400100, 0x00000010);
- stat &= ~0x00000010;
- }
-
- if (stat & 0x00000020) {
- NV_INFO(dev, "PGRAPH: ILLEGAL_CLASS ch %d [0x%010llx] subc %d "
- "class 0x%04x mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- nv_wr32(dev, 0x400100, 0x00000020);
- stat &= ~0x00000020;
- }
-
- if (stat & 0x00100000) {
- NV_INFO(dev, "PGRAPH: DATA_ERROR [");
- nouveau_enum_print(nv50_data_error_names, code);
- printk("] ch %d [0x%010llx] subc %d class 0x%04x "
- "mthd 0x%04x data 0x%08x\n",
- chid, inst, subc, class, mthd, data);
- nv_wr32(dev, 0x400100, 0x00100000);
- stat &= ~0x00100000;
- }
-
- if (stat & 0x00200000) {
- nve0_graph_trap_isr(dev, chid);
- nv_wr32(dev, 0x400100, 0x00200000);
- stat &= ~0x00200000;
- }
-
- if (stat & 0x00080000) {
- nve0_graph_ctxctl_isr(dev);
- nv_wr32(dev, 0x400100, 0x00080000);
- stat &= ~0x00080000;
- }
-
- if (stat) {
- NV_INFO(dev, "PGRAPH: unknown stat 0x%08x\n", stat);
- nv_wr32(dev, 0x400100, stat);
- }
-
- nv_wr32(dev, 0x400500, 0x00010001);
-}
-
-static int
-nve0_graph_create_fw(struct drm_device *dev, const char *fwname,
- struct nve0_graph_fuc *fuc)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- const struct firmware *fw;
- char f[32];
- int ret;
-
- snprintf(f, sizeof(f), "nouveau/nv%02x_%s", dev_priv->chipset, fwname);
- ret = request_firmware(&fw, f, &dev->pdev->dev);
- if (ret)
- return ret;
-
- fuc->size = fw->size;
- fuc->data = kmemdup(fw->data, fuc->size, GFP_KERNEL);
- release_firmware(fw);
- return (fuc->data != NULL) ? 0 : -ENOMEM;
-}
-
-static void
-nve0_graph_destroy_fw(struct nve0_graph_fuc *fuc)
-{
- if (fuc->data) {
- kfree(fuc->data);
- fuc->data = NULL;
- }
-}
-
-static void
-nve0_graph_destroy(struct drm_device *dev, int engine)
-{
- struct nve0_graph_priv *priv = nv_engine(dev, engine);
-
- nve0_graph_destroy_fw(&priv->fuc409c);
- nve0_graph_destroy_fw(&priv->fuc409d);
- nve0_graph_destroy_fw(&priv->fuc41ac);
- nve0_graph_destroy_fw(&priv->fuc41ad);
-
- nouveau_irq_unregister(dev, 12);
-
- nouveau_gpuobj_ref(NULL, &priv->unk4188b8);
- nouveau_gpuobj_ref(NULL, &priv->unk4188b4);
-
- if (priv->grctx_vals)
- kfree(priv->grctx_vals);
-
- NVOBJ_ENGINE_DEL(dev, GR);
- kfree(priv);
-}
-
-int
-nve0_graph_create(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nve0_graph_priv *priv;
- int ret, gpc, i;
- u32 kepler;
-
- kepler = nve0_graph_class(dev);
- if (!kepler) {
- NV_ERROR(dev, "PGRAPH: unsupported chipset, please report!\n");
- return 0;
- }
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->base.destroy = nve0_graph_destroy;
- priv->base.init = nve0_graph_init;
- priv->base.fini = nve0_graph_fini;
- priv->base.context_new = nve0_graph_context_new;
- priv->base.context_del = nve0_graph_context_del;
- priv->base.object_new = nve0_graph_object_new;
-
- NVOBJ_ENGINE_ADD(dev, GR, &priv->base);
- nouveau_irq_register(dev, 12, nve0_graph_isr);
-
- NV_INFO(dev, "PGRAPH: using external firmware\n");
- if (nve0_graph_create_fw(dev, "fuc409c", &priv->fuc409c) ||
- nve0_graph_create_fw(dev, "fuc409d", &priv->fuc409d) ||
- nve0_graph_create_fw(dev, "fuc41ac", &priv->fuc41ac) ||
- nve0_graph_create_fw(dev, "fuc41ad", &priv->fuc41ad)) {
- ret = 0;
- goto error;
- }
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b4);
- if (ret)
- goto error;
-
- ret = nouveau_gpuobj_new(dev, NULL, 0x1000, 256, 0, &priv->unk4188b8);
- if (ret)
- goto error;
-
- for (i = 0; i < 0x1000; i += 4) {
- nv_wo32(priv->unk4188b4, i, 0x00000010);
- nv_wo32(priv->unk4188b8, i, 0x00000010);
- }
-
- priv->gpc_nr = nv_rd32(dev, 0x409604) & 0x0000001f;
- priv->rop_nr = (nv_rd32(dev, 0x409604) & 0x001f0000) >> 16;
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- priv->tpc_nr[gpc] = nv_rd32(dev, GPC_UNIT(gpc, 0x2608));
- priv->tpc_total += priv->tpc_nr[gpc];
- }
-
- switch (dev_priv->chipset) {
- case 0xe4:
- if (priv->tpc_total == 8)
- priv->magic_not_rop_nr = 3;
- else
- if (priv->tpc_total == 7)
- priv->magic_not_rop_nr = 1;
- break;
- case 0xe7:
- priv->magic_not_rop_nr = 1;
- break;
- default:
- break;
- }
-
- if (!priv->magic_not_rop_nr) {
- NV_ERROR(dev, "PGRAPH: unknown config: %d/%d/%d/%d, %d\n",
- priv->tpc_nr[0], priv->tpc_nr[1], priv->tpc_nr[2],
- priv->tpc_nr[3], priv->rop_nr);
- priv->magic_not_rop_nr = 0x00;
- }
-
- NVOBJ_CLASS(dev, 0xa097, GR); /* subc 0: 3D */
- NVOBJ_CLASS(dev, 0xa0c0, GR); /* subc 1: COMPUTE */
- NVOBJ_CLASS(dev, 0xa040, GR); /* subc 2: P2MF */
- NVOBJ_CLASS(dev, 0x902d, GR); /* subc 3: 2D */
- NVOBJ_CLASS(dev, 0xa0b5, GR); /* subc 4: COPY */
- return 0;
-
-error:
- nve0_graph_destroy(dev, NVOBJ_ENGINE_GR);
- return ret;
-}
+++ /dev/null
-/*
- * Copyright 2012 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#ifndef __NVE0_GRAPH_H__
-#define __NVE0_GRAPH_H__
-
-#define GPC_MAX 4
-#define TPC_MAX 32
-
-#define ROP_BCAST(r) (0x408800 + (r))
-#define ROP_UNIT(u, r) (0x410000 + (u) * 0x400 + (r))
-#define GPC_BCAST(r) (0x418000 + (r))
-#define GPC_UNIT(t, r) (0x500000 + (t) * 0x8000 + (r))
-#define TPC_UNIT(t, m, r) (0x504000 + (t) * 0x8000 + (m) * 0x800 + (r))
-
-struct nve0_graph_fuc {
- u32 *data;
- u32 size;
-};
-
-struct nve0_graph_priv {
- struct nouveau_exec_engine base;
-
- struct nve0_graph_fuc fuc409c;
- struct nve0_graph_fuc fuc409d;
- struct nve0_graph_fuc fuc41ac;
- struct nve0_graph_fuc fuc41ad;
-
- u8 gpc_nr;
- u8 rop_nr;
- u8 tpc_nr[GPC_MAX];
- u8 tpc_total;
-
- u32 grctx_size;
- u32 *grctx_vals;
- struct nouveau_gpuobj *unk4188b4;
- struct nouveau_gpuobj *unk4188b8;
-
- u8 magic_not_rop_nr;
-};
-
-struct nve0_graph_chan {
- struct nouveau_gpuobj *grctx;
- struct nouveau_gpuobj *unk408004; /* 0x418810 too */
- struct nouveau_gpuobj *unk40800c; /* 0x419004 too */
- struct nouveau_gpuobj *unk418810; /* 0x419848 too */
- struct nouveau_gpuobj *mmio;
- int mmio_nr;
-};
-
-int nve0_grctx_generate(struct nouveau_channel *);
-
-/* nve0_graph.c uses this also to determine supported chipsets */
-static inline u32
-nve0_graph_class(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
-
- switch (dev_priv->chipset) {
- case 0xe4:
- case 0xe7:
- return 0xa097;
- default:
- return 0;
- }
-}
-
-#endif
+++ /dev/null
-/*
- * Copyright 2010 Red Hat Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: Ben Skeggs
- */
-
-#include "drmP.h"
-#include "nouveau_drv.h"
-#include "nouveau_mm.h"
-#include "nve0_graph.h"
-
-static void
-nv_icmd(struct drm_device *dev, u32 icmd, u32 data)
-{
- nv_wr32(dev, 0x400204, data);
- nv_wr32(dev, 0x400200, icmd);
- while (nv_rd32(dev, 0x400700) & 0x00000002) {}
-}
-
-static void
-nve0_grctx_generate_icmd(struct drm_device *dev)
-{
- nv_wr32(dev, 0x400208, 0x80000000);
- nv_icmd(dev, 0x001000, 0x00000004);
- nv_icmd(dev, 0x000039, 0x00000000);
- nv_icmd(dev, 0x00003a, 0x00000000);
- nv_icmd(dev, 0x00003b, 0x00000000);
- nv_icmd(dev, 0x0000a9, 0x0000ffff);
- nv_icmd(dev, 0x000038, 0x0fac6881);
- nv_icmd(dev, 0x00003d, 0x00000001);
- nv_icmd(dev, 0x0000e8, 0x00000400);
- nv_icmd(dev, 0x0000e9, 0x00000400);
- nv_icmd(dev, 0x0000ea, 0x00000400);
- nv_icmd(dev, 0x0000eb, 0x00000400);
- nv_icmd(dev, 0x0000ec, 0x00000400);
- nv_icmd(dev, 0x0000ed, 0x00000400);
- nv_icmd(dev, 0x0000ee, 0x00000400);
- nv_icmd(dev, 0x0000ef, 0x00000400);
- nv_icmd(dev, 0x000078, 0x00000300);
- nv_icmd(dev, 0x000079, 0x00000300);
- nv_icmd(dev, 0x00007a, 0x00000300);
- nv_icmd(dev, 0x00007b, 0x00000300);
- nv_icmd(dev, 0x00007c, 0x00000300);
- nv_icmd(dev, 0x00007d, 0x00000300);
- nv_icmd(dev, 0x00007e, 0x00000300);
- nv_icmd(dev, 0x00007f, 0x00000300);
- nv_icmd(dev, 0x000050, 0x00000011);
- nv_icmd(dev, 0x000058, 0x00000008);
- nv_icmd(dev, 0x000059, 0x00000008);
- nv_icmd(dev, 0x00005a, 0x00000008);
- nv_icmd(dev, 0x00005b, 0x00000008);
- nv_icmd(dev, 0x00005c, 0x00000008);
- nv_icmd(dev, 0x00005d, 0x00000008);
- nv_icmd(dev, 0x00005e, 0x00000008);
- nv_icmd(dev, 0x00005f, 0x00000008);
- nv_icmd(dev, 0x000208, 0x00000001);
- nv_icmd(dev, 0x000209, 0x00000001);
- nv_icmd(dev, 0x00020a, 0x00000001);
- nv_icmd(dev, 0x00020b, 0x00000001);
- nv_icmd(dev, 0x00020c, 0x00000001);
- nv_icmd(dev, 0x00020d, 0x00000001);
- nv_icmd(dev, 0x00020e, 0x00000001);
- nv_icmd(dev, 0x00020f, 0x00000001);
- nv_icmd(dev, 0x000081, 0x00000001);
- nv_icmd(dev, 0x000085, 0x00000004);
- nv_icmd(dev, 0x000088, 0x00000400);
- nv_icmd(dev, 0x000090, 0x00000300);
- nv_icmd(dev, 0x000098, 0x00001001);
- nv_icmd(dev, 0x0000e3, 0x00000001);
- nv_icmd(dev, 0x0000da, 0x00000001);
- nv_icmd(dev, 0x0000f8, 0x00000003);
- nv_icmd(dev, 0x0000fa, 0x00000001);
- nv_icmd(dev, 0x00009f, 0x0000ffff);
- nv_icmd(dev, 0x0000a0, 0x0000ffff);
- nv_icmd(dev, 0x0000a1, 0x0000ffff);
- nv_icmd(dev, 0x0000a2, 0x0000ffff);
- nv_icmd(dev, 0x0000b1, 0x00000001);
- nv_icmd(dev, 0x0000ad, 0x0000013e);
- nv_icmd(dev, 0x0000e1, 0x00000010);
- nv_icmd(dev, 0x000290, 0x00000000);
- nv_icmd(dev, 0x000291, 0x00000000);
- nv_icmd(dev, 0x000292, 0x00000000);
- nv_icmd(dev, 0x000293, 0x00000000);
- nv_icmd(dev, 0x000294, 0x00000000);
- nv_icmd(dev, 0x000295, 0x00000000);
- nv_icmd(dev, 0x000296, 0x00000000);
- nv_icmd(dev, 0x000297, 0x00000000);
- nv_icmd(dev, 0x000298, 0x00000000);
- nv_icmd(dev, 0x000299, 0x00000000);
- nv_icmd(dev, 0x00029a, 0x00000000);
- nv_icmd(dev, 0x00029b, 0x00000000);
- nv_icmd(dev, 0x00029c, 0x00000000);
- nv_icmd(dev, 0x00029d, 0x00000000);
- nv_icmd(dev, 0x00029e, 0x00000000);
- nv_icmd(dev, 0x00029f, 0x00000000);
- nv_icmd(dev, 0x0003b0, 0x00000000);
- nv_icmd(dev, 0x0003b1, 0x00000000);
- nv_icmd(dev, 0x0003b2, 0x00000000);
- nv_icmd(dev, 0x0003b3, 0x00000000);
- nv_icmd(dev, 0x0003b4, 0x00000000);
- nv_icmd(dev, 0x0003b5, 0x00000000);
- nv_icmd(dev, 0x0003b6, 0x00000000);
- nv_icmd(dev, 0x0003b7, 0x00000000);
- nv_icmd(dev, 0x0003b8, 0x00000000);
- nv_icmd(dev, 0x0003b9, 0x00000000);
- nv_icmd(dev, 0x0003ba, 0x00000000);
- nv_icmd(dev, 0x0003bb, 0x00000000);
- nv_icmd(dev, 0x0003bc, 0x00000000);
- nv_icmd(dev, 0x0003bd, 0x00000000);
- nv_icmd(dev, 0x0003be, 0x00000000);
- nv_icmd(dev, 0x0003bf, 0x00000000);
- nv_icmd(dev, 0x0002a0, 0x00000000);
- nv_icmd(dev, 0x0002a1, 0x00000000);
- nv_icmd(dev, 0x0002a2, 0x00000000);
- nv_icmd(dev, 0x0002a3, 0x00000000);
- nv_icmd(dev, 0x0002a4, 0x00000000);
- nv_icmd(dev, 0x0002a5, 0x00000000);
- nv_icmd(dev, 0x0002a6, 0x00000000);
- nv_icmd(dev, 0x0002a7, 0x00000000);
- nv_icmd(dev, 0x0002a8, 0x00000000);
- nv_icmd(dev, 0x0002a9, 0x00000000);
- nv_icmd(dev, 0x0002aa, 0x00000000);
- nv_icmd(dev, 0x0002ab, 0x00000000);
- nv_icmd(dev, 0x0002ac, 0x00000000);
- nv_icmd(dev, 0x0002ad, 0x00000000);
- nv_icmd(dev, 0x0002ae, 0x00000000);
- nv_icmd(dev, 0x0002af, 0x00000000);
- nv_icmd(dev, 0x000420, 0x00000000);
- nv_icmd(dev, 0x000421, 0x00000000);
- nv_icmd(dev, 0x000422, 0x00000000);
- nv_icmd(dev, 0x000423, 0x00000000);
- nv_icmd(dev, 0x000424, 0x00000000);
- nv_icmd(dev, 0x000425, 0x00000000);
- nv_icmd(dev, 0x000426, 0x00000000);
- nv_icmd(dev, 0x000427, 0x00000000);
- nv_icmd(dev, 0x000428, 0x00000000);
- nv_icmd(dev, 0x000429, 0x00000000);
- nv_icmd(dev, 0x00042a, 0x00000000);
- nv_icmd(dev, 0x00042b, 0x00000000);
- nv_icmd(dev, 0x00042c, 0x00000000);
- nv_icmd(dev, 0x00042d, 0x00000000);
- nv_icmd(dev, 0x00042e, 0x00000000);
- nv_icmd(dev, 0x00042f, 0x00000000);
- nv_icmd(dev, 0x0002b0, 0x00000000);
- nv_icmd(dev, 0x0002b1, 0x00000000);
- nv_icmd(dev, 0x0002b2, 0x00000000);
- nv_icmd(dev, 0x0002b3, 0x00000000);
- nv_icmd(dev, 0x0002b4, 0x00000000);
- nv_icmd(dev, 0x0002b5, 0x00000000);
- nv_icmd(dev, 0x0002b6, 0x00000000);
- nv_icmd(dev, 0x0002b7, 0x00000000);
- nv_icmd(dev, 0x0002b8, 0x00000000);
- nv_icmd(dev, 0x0002b9, 0x00000000);
- nv_icmd(dev, 0x0002ba, 0x00000000);
- nv_icmd(dev, 0x0002bb, 0x00000000);
- nv_icmd(dev, 0x0002bc, 0x00000000);
- nv_icmd(dev, 0x0002bd, 0x00000000);
- nv_icmd(dev, 0x0002be, 0x00000000);
- nv_icmd(dev, 0x0002bf, 0x00000000);
- nv_icmd(dev, 0x000430, 0x00000000);
- nv_icmd(dev, 0x000431, 0x00000000);
- nv_icmd(dev, 0x000432, 0x00000000);
- nv_icmd(dev, 0x000433, 0x00000000);
- nv_icmd(dev, 0x000434, 0x00000000);
- nv_icmd(dev, 0x000435, 0x00000000);
- nv_icmd(dev, 0x000436, 0x00000000);
- nv_icmd(dev, 0x000437, 0x00000000);
- nv_icmd(dev, 0x000438, 0x00000000);
- nv_icmd(dev, 0x000439, 0x00000000);
- nv_icmd(dev, 0x00043a, 0x00000000);
- nv_icmd(dev, 0x00043b, 0x00000000);
- nv_icmd(dev, 0x00043c, 0x00000000);
- nv_icmd(dev, 0x00043d, 0x00000000);
- nv_icmd(dev, 0x00043e, 0x00000000);
- nv_icmd(dev, 0x00043f, 0x00000000);
- nv_icmd(dev, 0x0002c0, 0x00000000);
- nv_icmd(dev, 0x0002c1, 0x00000000);
- nv_icmd(dev, 0x0002c2, 0x00000000);
- nv_icmd(dev, 0x0002c3, 0x00000000);
- nv_icmd(dev, 0x0002c4, 0x00000000);
- nv_icmd(dev, 0x0002c5, 0x00000000);
- nv_icmd(dev, 0x0002c6, 0x00000000);
- nv_icmd(dev, 0x0002c7, 0x00000000);
- nv_icmd(dev, 0x0002c8, 0x00000000);
- nv_icmd(dev, 0x0002c9, 0x00000000);
- nv_icmd(dev, 0x0002ca, 0x00000000);
- nv_icmd(dev, 0x0002cb, 0x00000000);
- nv_icmd(dev, 0x0002cc, 0x00000000);
- nv_icmd(dev, 0x0002cd, 0x00000000);
- nv_icmd(dev, 0x0002ce, 0x00000000);
- nv_icmd(dev, 0x0002cf, 0x00000000);
- nv_icmd(dev, 0x0004d0, 0x00000000);
- nv_icmd(dev, 0x0004d1, 0x00000000);
- nv_icmd(dev, 0x0004d2, 0x00000000);
- nv_icmd(dev, 0x0004d3, 0x00000000);
- nv_icmd(dev, 0x0004d4, 0x00000000);
- nv_icmd(dev, 0x0004d5, 0x00000000);
- nv_icmd(dev, 0x0004d6, 0x00000000);
- nv_icmd(dev, 0x0004d7, 0x00000000);
- nv_icmd(dev, 0x0004d8, 0x00000000);
- nv_icmd(dev, 0x0004d9, 0x00000000);
- nv_icmd(dev, 0x0004da, 0x00000000);
- nv_icmd(dev, 0x0004db, 0x00000000);
- nv_icmd(dev, 0x0004dc, 0x00000000);
- nv_icmd(dev, 0x0004dd, 0x00000000);
- nv_icmd(dev, 0x0004de, 0x00000000);
- nv_icmd(dev, 0x0004df, 0x00000000);
- nv_icmd(dev, 0x000720, 0x00000000);
- nv_icmd(dev, 0x000721, 0x00000000);
- nv_icmd(dev, 0x000722, 0x00000000);
- nv_icmd(dev, 0x000723, 0x00000000);
- nv_icmd(dev, 0x000724, 0x00000000);
- nv_icmd(dev, 0x000725, 0x00000000);
- nv_icmd(dev, 0x000726, 0x00000000);
- nv_icmd(dev, 0x000727, 0x00000000);
- nv_icmd(dev, 0x000728, 0x00000000);
- nv_icmd(dev, 0x000729, 0x00000000);
- nv_icmd(dev, 0x00072a, 0x00000000);
- nv_icmd(dev, 0x00072b, 0x00000000);
- nv_icmd(dev, 0x00072c, 0x00000000);
- nv_icmd(dev, 0x00072d, 0x00000000);
- nv_icmd(dev, 0x00072e, 0x00000000);
- nv_icmd(dev, 0x00072f, 0x00000000);
- nv_icmd(dev, 0x0008c0, 0x00000000);
- nv_icmd(dev, 0x0008c1, 0x00000000);
- nv_icmd(dev, 0x0008c2, 0x00000000);
- nv_icmd(dev, 0x0008c3, 0x00000000);
- nv_icmd(dev, 0x0008c4, 0x00000000);
- nv_icmd(dev, 0x0008c5, 0x00000000);
- nv_icmd(dev, 0x0008c6, 0x00000000);
- nv_icmd(dev, 0x0008c7, 0x00000000);
- nv_icmd(dev, 0x0008c8, 0x00000000);
- nv_icmd(dev, 0x0008c9, 0x00000000);
- nv_icmd(dev, 0x0008ca, 0x00000000);
- nv_icmd(dev, 0x0008cb, 0x00000000);
- nv_icmd(dev, 0x0008cc, 0x00000000);
- nv_icmd(dev, 0x0008cd, 0x00000000);
- nv_icmd(dev, 0x0008ce, 0x00000000);
- nv_icmd(dev, 0x0008cf, 0x00000000);
- nv_icmd(dev, 0x000890, 0x00000000);
- nv_icmd(dev, 0x000891, 0x00000000);
- nv_icmd(dev, 0x000892, 0x00000000);
- nv_icmd(dev, 0x000893, 0x00000000);
- nv_icmd(dev, 0x000894, 0x00000000);
- nv_icmd(dev, 0x000895, 0x00000000);
- nv_icmd(dev, 0x000896, 0x00000000);
- nv_icmd(dev, 0x000897, 0x00000000);
- nv_icmd(dev, 0x000898, 0x00000000);
- nv_icmd(dev, 0x000899, 0x00000000);
- nv_icmd(dev, 0x00089a, 0x00000000);
- nv_icmd(dev, 0x00089b, 0x00000000);
- nv_icmd(dev, 0x00089c, 0x00000000);
- nv_icmd(dev, 0x00089d, 0x00000000);
- nv_icmd(dev, 0x00089e, 0x00000000);
- nv_icmd(dev, 0x00089f, 0x00000000);
- nv_icmd(dev, 0x0008e0, 0x00000000);
- nv_icmd(dev, 0x0008e1, 0x00000000);
- nv_icmd(dev, 0x0008e2, 0x00000000);
- nv_icmd(dev, 0x0008e3, 0x00000000);
- nv_icmd(dev, 0x0008e4, 0x00000000);
- nv_icmd(dev, 0x0008e5, 0x00000000);
- nv_icmd(dev, 0x0008e6, 0x00000000);
- nv_icmd(dev, 0x0008e7, 0x00000000);
- nv_icmd(dev, 0x0008e8, 0x00000000);
- nv_icmd(dev, 0x0008e9, 0x00000000);
- nv_icmd(dev, 0x0008ea, 0x00000000);
- nv_icmd(dev, 0x0008eb, 0x00000000);
- nv_icmd(dev, 0x0008ec, 0x00000000);
- nv_icmd(dev, 0x0008ed, 0x00000000);
- nv_icmd(dev, 0x0008ee, 0x00000000);
- nv_icmd(dev, 0x0008ef, 0x00000000);
- nv_icmd(dev, 0x0008a0, 0x00000000);
- nv_icmd(dev, 0x0008a1, 0x00000000);
- nv_icmd(dev, 0x0008a2, 0x00000000);
- nv_icmd(dev, 0x0008a3, 0x00000000);
- nv_icmd(dev, 0x0008a4, 0x00000000);
- nv_icmd(dev, 0x0008a5, 0x00000000);
- nv_icmd(dev, 0x0008a6, 0x00000000);
- nv_icmd(dev, 0x0008a7, 0x00000000);
- nv_icmd(dev, 0x0008a8, 0x00000000);
- nv_icmd(dev, 0x0008a9, 0x00000000);
- nv_icmd(dev, 0x0008aa, 0x00000000);
- nv_icmd(dev, 0x0008ab, 0x00000000);
- nv_icmd(dev, 0x0008ac, 0x00000000);
- nv_icmd(dev, 0x0008ad, 0x00000000);
- nv_icmd(dev, 0x0008ae, 0x00000000);
- nv_icmd(dev, 0x0008af, 0x00000000);
- nv_icmd(dev, 0x0008f0, 0x00000000);
- nv_icmd(dev, 0x0008f1, 0x00000000);
- nv_icmd(dev, 0x0008f2, 0x00000000);
- nv_icmd(dev, 0x0008f3, 0x00000000);
- nv_icmd(dev, 0x0008f4, 0x00000000);
- nv_icmd(dev, 0x0008f5, 0x00000000);
- nv_icmd(dev, 0x0008f6, 0x00000000);
- nv_icmd(dev, 0x0008f7, 0x00000000);
- nv_icmd(dev, 0x0008f8, 0x00000000);
- nv_icmd(dev, 0x0008f9, 0x00000000);
- nv_icmd(dev, 0x0008fa, 0x00000000);
- nv_icmd(dev, 0x0008fb, 0x00000000);
- nv_icmd(dev, 0x0008fc, 0x00000000);
- nv_icmd(dev, 0x0008fd, 0x00000000);
- nv_icmd(dev, 0x0008fe, 0x00000000);
- nv_icmd(dev, 0x0008ff, 0x00000000);
- nv_icmd(dev, 0x00094c, 0x000000ff);
- nv_icmd(dev, 0x00094d, 0xffffffff);
- nv_icmd(dev, 0x00094e, 0x00000002);
- nv_icmd(dev, 0x0002ec, 0x00000001);
- nv_icmd(dev, 0x000303, 0x00000001);
- nv_icmd(dev, 0x0002e6, 0x00000001);
- nv_icmd(dev, 0x000466, 0x00000052);
- nv_icmd(dev, 0x000301, 0x3f800000);
- nv_icmd(dev, 0x000304, 0x30201000);
- nv_icmd(dev, 0x000305, 0x70605040);
- nv_icmd(dev, 0x000306, 0xb8a89888);
- nv_icmd(dev, 0x000307, 0xf8e8d8c8);
- nv_icmd(dev, 0x00030a, 0x00ffff00);
- nv_icmd(dev, 0x00030b, 0x0000001a);
- nv_icmd(dev, 0x00030c, 0x00000001);
- nv_icmd(dev, 0x000318, 0x00000001);
- nv_icmd(dev, 0x000340, 0x00000000);
- nv_icmd(dev, 0x000375, 0x00000001);
- nv_icmd(dev, 0x00037d, 0x00000006);
- nv_icmd(dev, 0x0003a0, 0x00000002);
- nv_icmd(dev, 0x0003aa, 0x00000001);
- nv_icmd(dev, 0x0003a9, 0x00000001);
- nv_icmd(dev, 0x000380, 0x00000001);
- nv_icmd(dev, 0x000383, 0x00000011);
- nv_icmd(dev, 0x000360, 0x00000040);
- nv_icmd(dev, 0x000366, 0x00000000);
- nv_icmd(dev, 0x000367, 0x00000000);
- nv_icmd(dev, 0x000368, 0x00000fff);
- nv_icmd(dev, 0x000370, 0x00000000);
- nv_icmd(dev, 0x000371, 0x00000000);
- nv_icmd(dev, 0x000372, 0x000fffff);
- nv_icmd(dev, 0x00037a, 0x00000012);
- nv_icmd(dev, 0x000619, 0x00000003);
- nv_icmd(dev, 0x000811, 0x00000003);
- nv_icmd(dev, 0x000812, 0x00000004);
- nv_icmd(dev, 0x000813, 0x00000006);
- nv_icmd(dev, 0x000814, 0x00000008);
- nv_icmd(dev, 0x000815, 0x0000000b);
- nv_icmd(dev, 0x000800, 0x00000001);
- nv_icmd(dev, 0x000801, 0x00000001);
- nv_icmd(dev, 0x000802, 0x00000001);
- nv_icmd(dev, 0x000803, 0x00000001);
- nv_icmd(dev, 0x000804, 0x00000001);
- nv_icmd(dev, 0x000805, 0x00000001);
- nv_icmd(dev, 0x000632, 0x00000001);
- nv_icmd(dev, 0x000633, 0x00000002);
- nv_icmd(dev, 0x000634, 0x00000003);
- nv_icmd(dev, 0x000635, 0x00000004);
- nv_icmd(dev, 0x000654, 0x3f800000);
- nv_icmd(dev, 0x000657, 0x3f800000);
- nv_icmd(dev, 0x000655, 0x3f800000);
- nv_icmd(dev, 0x000656, 0x3f800000);
- nv_icmd(dev, 0x0006cd, 0x3f800000);
- nv_icmd(dev, 0x0007f5, 0x3f800000);
- nv_icmd(dev, 0x0007dc, 0x39291909);
- nv_icmd(dev, 0x0007dd, 0x79695949);
- nv_icmd(dev, 0x0007de, 0xb9a99989);
- nv_icmd(dev, 0x0007df, 0xf9e9d9c9);
- nv_icmd(dev, 0x0007e8, 0x00003210);
- nv_icmd(dev, 0x0007e9, 0x00007654);
- nv_icmd(dev, 0x0007ea, 0x00000098);
- nv_icmd(dev, 0x0007ec, 0x39291909);
- nv_icmd(dev, 0x0007ed, 0x79695949);
- nv_icmd(dev, 0x0007ee, 0xb9a99989);
- nv_icmd(dev, 0x0007ef, 0xf9e9d9c9);
- nv_icmd(dev, 0x0007f0, 0x00003210);
- nv_icmd(dev, 0x0007f1, 0x00007654);
- nv_icmd(dev, 0x0007f2, 0x00000098);
- nv_icmd(dev, 0x0005a5, 0x00000001);
- nv_icmd(dev, 0x000980, 0x00000000);
- nv_icmd(dev, 0x000981, 0x00000000);
- nv_icmd(dev, 0x000982, 0x00000000);
- nv_icmd(dev, 0x000983, 0x00000000);
- nv_icmd(dev, 0x000984, 0x00000000);
- nv_icmd(dev, 0x000985, 0x00000000);
- nv_icmd(dev, 0x000986, 0x00000000);
- nv_icmd(dev, 0x000987, 0x00000000);
- nv_icmd(dev, 0x000988, 0x00000000);
- nv_icmd(dev, 0x000989, 0x00000000);
- nv_icmd(dev, 0x00098a, 0x00000000);
- nv_icmd(dev, 0x00098b, 0x00000000);
- nv_icmd(dev, 0x00098c, 0x00000000);
- nv_icmd(dev, 0x00098d, 0x00000000);
- nv_icmd(dev, 0x00098e, 0x00000000);
- nv_icmd(dev, 0x00098f, 0x00000000);
- nv_icmd(dev, 0x000990, 0x00000000);
- nv_icmd(dev, 0x000991, 0x00000000);
- nv_icmd(dev, 0x000992, 0x00000000);
- nv_icmd(dev, 0x000993, 0x00000000);
- nv_icmd(dev, 0x000994, 0x00000000);
- nv_icmd(dev, 0x000995, 0x00000000);
- nv_icmd(dev, 0x000996, 0x00000000);
- nv_icmd(dev, 0x000997, 0x00000000);
- nv_icmd(dev, 0x000998, 0x00000000);
- nv_icmd(dev, 0x000999, 0x00000000);
- nv_icmd(dev, 0x00099a, 0x00000000);
- nv_icmd(dev, 0x00099b, 0x00000000);
- nv_icmd(dev, 0x00099c, 0x00000000);
- nv_icmd(dev, 0x00099d, 0x00000000);
- nv_icmd(dev, 0x00099e, 0x00000000);
- nv_icmd(dev, 0x00099f, 0x00000000);
- nv_icmd(dev, 0x0009a0, 0x00000000);
- nv_icmd(dev, 0x0009a1, 0x00000000);
- nv_icmd(dev, 0x0009a2, 0x00000000);
- nv_icmd(dev, 0x0009a3, 0x00000000);
- nv_icmd(dev, 0x0009a4, 0x00000000);
- nv_icmd(dev, 0x0009a5, 0x00000000);
- nv_icmd(dev, 0x0009a6, 0x00000000);
- nv_icmd(dev, 0x0009a7, 0x00000000);
- nv_icmd(dev, 0x0009a8, 0x00000000);
- nv_icmd(dev, 0x0009a9, 0x00000000);
- nv_icmd(dev, 0x0009aa, 0x00000000);
- nv_icmd(dev, 0x0009ab, 0x00000000);
- nv_icmd(dev, 0x0009ac, 0x00000000);
- nv_icmd(dev, 0x0009ad, 0x00000000);
- nv_icmd(dev, 0x0009ae, 0x00000000);
- nv_icmd(dev, 0x0009af, 0x00000000);
- nv_icmd(dev, 0x0009b0, 0x00000000);
- nv_icmd(dev, 0x0009b1, 0x00000000);
- nv_icmd(dev, 0x0009b2, 0x00000000);
- nv_icmd(dev, 0x0009b3, 0x00000000);
- nv_icmd(dev, 0x0009b4, 0x00000000);
- nv_icmd(dev, 0x0009b5, 0x00000000);
- nv_icmd(dev, 0x0009b6, 0x00000000);
- nv_icmd(dev, 0x0009b7, 0x00000000);
- nv_icmd(dev, 0x0009b8, 0x00000000);
- nv_icmd(dev, 0x0009b9, 0x00000000);
- nv_icmd(dev, 0x0009ba, 0x00000000);
- nv_icmd(dev, 0x0009bb, 0x00000000);
- nv_icmd(dev, 0x0009bc, 0x00000000);
- nv_icmd(dev, 0x0009bd, 0x00000000);
- nv_icmd(dev, 0x0009be, 0x00000000);
- nv_icmd(dev, 0x0009bf, 0x00000000);
- nv_icmd(dev, 0x0009c0, 0x00000000);
- nv_icmd(dev, 0x0009c1, 0x00000000);
- nv_icmd(dev, 0x0009c2, 0x00000000);
- nv_icmd(dev, 0x0009c3, 0x00000000);
- nv_icmd(dev, 0x0009c4, 0x00000000);
- nv_icmd(dev, 0x0009c5, 0x00000000);
- nv_icmd(dev, 0x0009c6, 0x00000000);
- nv_icmd(dev, 0x0009c7, 0x00000000);
- nv_icmd(dev, 0x0009c8, 0x00000000);
- nv_icmd(dev, 0x0009c9, 0x00000000);
- nv_icmd(dev, 0x0009ca, 0x00000000);
- nv_icmd(dev, 0x0009cb, 0x00000000);
- nv_icmd(dev, 0x0009cc, 0x00000000);
- nv_icmd(dev, 0x0009cd, 0x00000000);
- nv_icmd(dev, 0x0009ce, 0x00000000);
- nv_icmd(dev, 0x0009cf, 0x00000000);
- nv_icmd(dev, 0x0009d0, 0x00000000);
- nv_icmd(dev, 0x0009d1, 0x00000000);
- nv_icmd(dev, 0x0009d2, 0x00000000);
- nv_icmd(dev, 0x0009d3, 0x00000000);
- nv_icmd(dev, 0x0009d4, 0x00000000);
- nv_icmd(dev, 0x0009d5, 0x00000000);
- nv_icmd(dev, 0x0009d6, 0x00000000);
- nv_icmd(dev, 0x0009d7, 0x00000000);
- nv_icmd(dev, 0x0009d8, 0x00000000);
- nv_icmd(dev, 0x0009d9, 0x00000000);
- nv_icmd(dev, 0x0009da, 0x00000000);
- nv_icmd(dev, 0x0009db, 0x00000000);
- nv_icmd(dev, 0x0009dc, 0x00000000);
- nv_icmd(dev, 0x0009dd, 0x00000000);
- nv_icmd(dev, 0x0009de, 0x00000000);
- nv_icmd(dev, 0x0009df, 0x00000000);
- nv_icmd(dev, 0x0009e0, 0x00000000);
- nv_icmd(dev, 0x0009e1, 0x00000000);
- nv_icmd(dev, 0x0009e2, 0x00000000);
- nv_icmd(dev, 0x0009e3, 0x00000000);
- nv_icmd(dev, 0x0009e4, 0x00000000);
- nv_icmd(dev, 0x0009e5, 0x00000000);
- nv_icmd(dev, 0x0009e6, 0x00000000);
- nv_icmd(dev, 0x0009e7, 0x00000000);
- nv_icmd(dev, 0x0009e8, 0x00000000);
- nv_icmd(dev, 0x0009e9, 0x00000000);
- nv_icmd(dev, 0x0009ea, 0x00000000);
- nv_icmd(dev, 0x0009eb, 0x00000000);
- nv_icmd(dev, 0x0009ec, 0x00000000);
- nv_icmd(dev, 0x0009ed, 0x00000000);
- nv_icmd(dev, 0x0009ee, 0x00000000);
- nv_icmd(dev, 0x0009ef, 0x00000000);
- nv_icmd(dev, 0x0009f0, 0x00000000);
- nv_icmd(dev, 0x0009f1, 0x00000000);
- nv_icmd(dev, 0x0009f2, 0x00000000);
- nv_icmd(dev, 0x0009f3, 0x00000000);
- nv_icmd(dev, 0x0009f4, 0x00000000);
- nv_icmd(dev, 0x0009f5, 0x00000000);
- nv_icmd(dev, 0x0009f6, 0x00000000);
- nv_icmd(dev, 0x0009f7, 0x00000000);
- nv_icmd(dev, 0x0009f8, 0x00000000);
- nv_icmd(dev, 0x0009f9, 0x00000000);
- nv_icmd(dev, 0x0009fa, 0x00000000);
- nv_icmd(dev, 0x0009fb, 0x00000000);
- nv_icmd(dev, 0x0009fc, 0x00000000);
- nv_icmd(dev, 0x0009fd, 0x00000000);
- nv_icmd(dev, 0x0009fe, 0x00000000);
- nv_icmd(dev, 0x0009ff, 0x00000000);
- nv_icmd(dev, 0x000468, 0x00000004);
- nv_icmd(dev, 0x00046c, 0x00000001);
- nv_icmd(dev, 0x000470, 0x00000000);
- nv_icmd(dev, 0x000471, 0x00000000);
- nv_icmd(dev, 0x000472, 0x00000000);
- nv_icmd(dev, 0x000473, 0x00000000);
- nv_icmd(dev, 0x000474, 0x00000000);
- nv_icmd(dev, 0x000475, 0x00000000);
- nv_icmd(dev, 0x000476, 0x00000000);
- nv_icmd(dev, 0x000477, 0x00000000);
- nv_icmd(dev, 0x000478, 0x00000000);
- nv_icmd(dev, 0x000479, 0x00000000);
- nv_icmd(dev, 0x00047a, 0x00000000);
- nv_icmd(dev, 0x00047b, 0x00000000);
- nv_icmd(dev, 0x00047c, 0x00000000);
- nv_icmd(dev, 0x00047d, 0x00000000);
- nv_icmd(dev, 0x00047e, 0x00000000);
- nv_icmd(dev, 0x00047f, 0x00000000);
- nv_icmd(dev, 0x000480, 0x00000000);
- nv_icmd(dev, 0x000481, 0x00000000);
- nv_icmd(dev, 0x000482, 0x00000000);
- nv_icmd(dev, 0x000483, 0x00000000);
- nv_icmd(dev, 0x000484, 0x00000000);
- nv_icmd(dev, 0x000485, 0x00000000);
- nv_icmd(dev, 0x000486, 0x00000000);
- nv_icmd(dev, 0x000487, 0x00000000);
- nv_icmd(dev, 0x000488, 0x00000000);
- nv_icmd(dev, 0x000489, 0x00000000);
- nv_icmd(dev, 0x00048a, 0x00000000);
- nv_icmd(dev, 0x00048b, 0x00000000);
- nv_icmd(dev, 0x00048c, 0x00000000);
- nv_icmd(dev, 0x00048d, 0x00000000);
- nv_icmd(dev, 0x00048e, 0x00000000);
- nv_icmd(dev, 0x00048f, 0x00000000);
- nv_icmd(dev, 0x000490, 0x00000000);
- nv_icmd(dev, 0x000491, 0x00000000);
- nv_icmd(dev, 0x000492, 0x00000000);
- nv_icmd(dev, 0x000493, 0x00000000);
- nv_icmd(dev, 0x000494, 0x00000000);
- nv_icmd(dev, 0x000495, 0x00000000);
- nv_icmd(dev, 0x000496, 0x00000000);
- nv_icmd(dev, 0x000497, 0x00000000);
- nv_icmd(dev, 0x000498, 0x00000000);
- nv_icmd(dev, 0x000499, 0x00000000);
- nv_icmd(dev, 0x00049a, 0x00000000);
- nv_icmd(dev, 0x00049b, 0x00000000);
- nv_icmd(dev, 0x00049c, 0x00000000);
- nv_icmd(dev, 0x00049d, 0x00000000);
- nv_icmd(dev, 0x00049e, 0x00000000);
- nv_icmd(dev, 0x00049f, 0x00000000);
- nv_icmd(dev, 0x0004a0, 0x00000000);
- nv_icmd(dev, 0x0004a1, 0x00000000);
- nv_icmd(dev, 0x0004a2, 0x00000000);
- nv_icmd(dev, 0x0004a3, 0x00000000);
- nv_icmd(dev, 0x0004a4, 0x00000000);
- nv_icmd(dev, 0x0004a5, 0x00000000);
- nv_icmd(dev, 0x0004a6, 0x00000000);
- nv_icmd(dev, 0x0004a7, 0x00000000);
- nv_icmd(dev, 0x0004a8, 0x00000000);
- nv_icmd(dev, 0x0004a9, 0x00000000);
- nv_icmd(dev, 0x0004aa, 0x00000000);
- nv_icmd(dev, 0x0004ab, 0x00000000);
- nv_icmd(dev, 0x0004ac, 0x00000000);
- nv_icmd(dev, 0x0004ad, 0x00000000);
- nv_icmd(dev, 0x0004ae, 0x00000000);
- nv_icmd(dev, 0x0004af, 0x00000000);
- nv_icmd(dev, 0x0004b0, 0x00000000);
- nv_icmd(dev, 0x0004b1, 0x00000000);
- nv_icmd(dev, 0x0004b2, 0x00000000);
- nv_icmd(dev, 0x0004b3, 0x00000000);
- nv_icmd(dev, 0x0004b4, 0x00000000);
- nv_icmd(dev, 0x0004b5, 0x00000000);
- nv_icmd(dev, 0x0004b6, 0x00000000);
- nv_icmd(dev, 0x0004b7, 0x00000000);
- nv_icmd(dev, 0x0004b8, 0x00000000);
- nv_icmd(dev, 0x0004b9, 0x00000000);
- nv_icmd(dev, 0x0004ba, 0x00000000);
- nv_icmd(dev, 0x0004bb, 0x00000000);
- nv_icmd(dev, 0x0004bc, 0x00000000);
- nv_icmd(dev, 0x0004bd, 0x00000000);
- nv_icmd(dev, 0x0004be, 0x00000000);
- nv_icmd(dev, 0x0004bf, 0x00000000);
- nv_icmd(dev, 0x0004c0, 0x00000000);
- nv_icmd(dev, 0x0004c1, 0x00000000);
- nv_icmd(dev, 0x0004c2, 0x00000000);
- nv_icmd(dev, 0x0004c3, 0x00000000);
- nv_icmd(dev, 0x0004c4, 0x00000000);
- nv_icmd(dev, 0x0004c5, 0x00000000);
- nv_icmd(dev, 0x0004c6, 0x00000000);
- nv_icmd(dev, 0x0004c7, 0x00000000);
- nv_icmd(dev, 0x0004c8, 0x00000000);
- nv_icmd(dev, 0x0004c9, 0x00000000);
- nv_icmd(dev, 0x0004ca, 0x00000000);
- nv_icmd(dev, 0x0004cb, 0x00000000);
- nv_icmd(dev, 0x0004cc, 0x00000000);
- nv_icmd(dev, 0x0004cd, 0x00000000);
- nv_icmd(dev, 0x0004ce, 0x00000000);
- nv_icmd(dev, 0x0004cf, 0x00000000);
- nv_icmd(dev, 0x000510, 0x3f800000);
- nv_icmd(dev, 0x000511, 0x3f800000);
- nv_icmd(dev, 0x000512, 0x3f800000);
- nv_icmd(dev, 0x000513, 0x3f800000);
- nv_icmd(dev, 0x000514, 0x3f800000);
- nv_icmd(dev, 0x000515, 0x3f800000);
- nv_icmd(dev, 0x000516, 0x3f800000);
- nv_icmd(dev, 0x000517, 0x3f800000);
- nv_icmd(dev, 0x000518, 0x3f800000);
- nv_icmd(dev, 0x000519, 0x3f800000);
- nv_icmd(dev, 0x00051a, 0x3f800000);
- nv_icmd(dev, 0x00051b, 0x3f800000);
- nv_icmd(dev, 0x00051c, 0x3f800000);
- nv_icmd(dev, 0x00051d, 0x3f800000);
- nv_icmd(dev, 0x00051e, 0x3f800000);
- nv_icmd(dev, 0x00051f, 0x3f800000);
- nv_icmd(dev, 0x000520, 0x000002b6);
- nv_icmd(dev, 0x000529, 0x00000001);
- nv_icmd(dev, 0x000530, 0xffff0000);
- nv_icmd(dev, 0x000531, 0xffff0000);
- nv_icmd(dev, 0x000532, 0xffff0000);
- nv_icmd(dev, 0x000533, 0xffff0000);
- nv_icmd(dev, 0x000534, 0xffff0000);
- nv_icmd(dev, 0x000535, 0xffff0000);
- nv_icmd(dev, 0x000536, 0xffff0000);
- nv_icmd(dev, 0x000537, 0xffff0000);
- nv_icmd(dev, 0x000538, 0xffff0000);
- nv_icmd(dev, 0x000539, 0xffff0000);
- nv_icmd(dev, 0x00053a, 0xffff0000);
- nv_icmd(dev, 0x00053b, 0xffff0000);
- nv_icmd(dev, 0x00053c, 0xffff0000);
- nv_icmd(dev, 0x00053d, 0xffff0000);
- nv_icmd(dev, 0x00053e, 0xffff0000);
- nv_icmd(dev, 0x00053f, 0xffff0000);
- nv_icmd(dev, 0x000585, 0x0000003f);
- nv_icmd(dev, 0x000576, 0x00000003);
- nv_icmd(dev, 0x00057b, 0x00000059);
- nv_icmd(dev, 0x000586, 0x00000040);
- nv_icmd(dev, 0x000582, 0x00000080);
- nv_icmd(dev, 0x000583, 0x00000080);
- nv_icmd(dev, 0x0005c2, 0x00000001);
- nv_icmd(dev, 0x000638, 0x00000001);
- nv_icmd(dev, 0x000639, 0x00000001);
- nv_icmd(dev, 0x00063a, 0x00000002);
- nv_icmd(dev, 0x00063b, 0x00000001);
- nv_icmd(dev, 0x00063c, 0x00000001);
- nv_icmd(dev, 0x00063d, 0x00000002);
- nv_icmd(dev, 0x00063e, 0x00000001);
- nv_icmd(dev, 0x0008b8, 0x00000001);
- nv_icmd(dev, 0x0008b9, 0x00000001);
- nv_icmd(dev, 0x0008ba, 0x00000001);
- nv_icmd(dev, 0x0008bb, 0x00000001);
- nv_icmd(dev, 0x0008bc, 0x00000001);
- nv_icmd(dev, 0x0008bd, 0x00000001);
- nv_icmd(dev, 0x0008be, 0x00000001);
- nv_icmd(dev, 0x0008bf, 0x00000001);
- nv_icmd(dev, 0x000900, 0x00000001);
- nv_icmd(dev, 0x000901, 0x00000001);
- nv_icmd(dev, 0x000902, 0x00000001);
- nv_icmd(dev, 0x000903, 0x00000001);
- nv_icmd(dev, 0x000904, 0x00000001);
- nv_icmd(dev, 0x000905, 0x00000001);
- nv_icmd(dev, 0x000906, 0x00000001);
- nv_icmd(dev, 0x000907, 0x00000001);
- nv_icmd(dev, 0x000908, 0x00000002);
- nv_icmd(dev, 0x000909, 0x00000002);
- nv_icmd(dev, 0x00090a, 0x00000002);
- nv_icmd(dev, 0x00090b, 0x00000002);
- nv_icmd(dev, 0x00090c, 0x00000002);
- nv_icmd(dev, 0x00090d, 0x00000002);
- nv_icmd(dev, 0x00090e, 0x00000002);
- nv_icmd(dev, 0x00090f, 0x00000002);
- nv_icmd(dev, 0x000910, 0x00000001);
- nv_icmd(dev, 0x000911, 0x00000001);
- nv_icmd(dev, 0x000912, 0x00000001);
- nv_icmd(dev, 0x000913, 0x00000001);
- nv_icmd(dev, 0x000914, 0x00000001);
- nv_icmd(dev, 0x000915, 0x00000001);
- nv_icmd(dev, 0x000916, 0x00000001);
- nv_icmd(dev, 0x000917, 0x00000001);
- nv_icmd(dev, 0x000918, 0x00000001);
- nv_icmd(dev, 0x000919, 0x00000001);
- nv_icmd(dev, 0x00091a, 0x00000001);
- nv_icmd(dev, 0x00091b, 0x00000001);
- nv_icmd(dev, 0x00091c, 0x00000001);
- nv_icmd(dev, 0x00091d, 0x00000001);
- nv_icmd(dev, 0x00091e, 0x00000001);
- nv_icmd(dev, 0x00091f, 0x00000001);
- nv_icmd(dev, 0x000920, 0x00000002);
- nv_icmd(dev, 0x000921, 0x00000002);
- nv_icmd(dev, 0x000922, 0x00000002);
- nv_icmd(dev, 0x000923, 0x00000002);
- nv_icmd(dev, 0x000924, 0x00000002);
- nv_icmd(dev, 0x000925, 0x00000002);
- nv_icmd(dev, 0x000926, 0x00000002);
- nv_icmd(dev, 0x000927, 0x00000002);
- nv_icmd(dev, 0x000928, 0x00000001);
- nv_icmd(dev, 0x000929, 0x00000001);
- nv_icmd(dev, 0x00092a, 0x00000001);
- nv_icmd(dev, 0x00092b, 0x00000001);
- nv_icmd(dev, 0x00092c, 0x00000001);
- nv_icmd(dev, 0x00092d, 0x00000001);
- nv_icmd(dev, 0x00092e, 0x00000001);
- nv_icmd(dev, 0x00092f, 0x00000001);
- nv_icmd(dev, 0x000648, 0x00000001);
- nv_icmd(dev, 0x000649, 0x00000001);
- nv_icmd(dev, 0x00064a, 0x00000001);
- nv_icmd(dev, 0x00064b, 0x00000001);
- nv_icmd(dev, 0x00064c, 0x00000001);
- nv_icmd(dev, 0x00064d, 0x00000001);
- nv_icmd(dev, 0x00064e, 0x00000001);
- nv_icmd(dev, 0x00064f, 0x00000001);
- nv_icmd(dev, 0x000650, 0x00000001);
- nv_icmd(dev, 0x000658, 0x0000000f);
- nv_icmd(dev, 0x0007ff, 0x0000000a);
- nv_icmd(dev, 0x00066a, 0x40000000);
- nv_icmd(dev, 0x00066b, 0x10000000);
- nv_icmd(dev, 0x00066c, 0xffff0000);
- nv_icmd(dev, 0x00066d, 0xffff0000);
- nv_icmd(dev, 0x0007af, 0x00000008);
- nv_icmd(dev, 0x0007b0, 0x00000008);
- nv_icmd(dev, 0x0007f6, 0x00000001);
- nv_icmd(dev, 0x0006b2, 0x00000055);
- nv_icmd(dev, 0x0007ad, 0x00000003);
- nv_icmd(dev, 0x000937, 0x00000001);
- nv_icmd(dev, 0x000971, 0x00000008);
- nv_icmd(dev, 0x000972, 0x00000040);
- nv_icmd(dev, 0x000973, 0x0000012c);
- nv_icmd(dev, 0x00097c, 0x00000040);
- nv_icmd(dev, 0x000979, 0x00000003);
- nv_icmd(dev, 0x000975, 0x00000020);
- nv_icmd(dev, 0x000976, 0x00000001);
- nv_icmd(dev, 0x000977, 0x00000020);
- nv_icmd(dev, 0x000978, 0x00000001);
- nv_icmd(dev, 0x000957, 0x00000003);
- nv_icmd(dev, 0x00095e, 0x20164010);
- nv_icmd(dev, 0x00095f, 0x00000020);
- nv_icmd(dev, 0x00097d, 0x00000020);
- nv_icmd(dev, 0x000683, 0x00000006);
- nv_icmd(dev, 0x000685, 0x003fffff);
- nv_icmd(dev, 0x000687, 0x003fffff);
- nv_icmd(dev, 0x0006a0, 0x00000005);
- nv_icmd(dev, 0x000840, 0x00400008);
- nv_icmd(dev, 0x000841, 0x08000080);
- nv_icmd(dev, 0x000842, 0x00400008);
- nv_icmd(dev, 0x000843, 0x08000080);
- nv_icmd(dev, 0x000818, 0x00000000);
- nv_icmd(dev, 0x000819, 0x00000000);
- nv_icmd(dev, 0x00081a, 0x00000000);
- nv_icmd(dev, 0x00081b, 0x00000000);
- nv_icmd(dev, 0x00081c, 0x00000000);
- nv_icmd(dev, 0x00081d, 0x00000000);
- nv_icmd(dev, 0x00081e, 0x00000000);
- nv_icmd(dev, 0x00081f, 0x00000000);
- nv_icmd(dev, 0x000848, 0x00000000);
- nv_icmd(dev, 0x000849, 0x00000000);
- nv_icmd(dev, 0x00084a, 0x00000000);
- nv_icmd(dev, 0x00084b, 0x00000000);
- nv_icmd(dev, 0x00084c, 0x00000000);
- nv_icmd(dev, 0x00084d, 0x00000000);
- nv_icmd(dev, 0x00084e, 0x00000000);
- nv_icmd(dev, 0x00084f, 0x00000000);
- nv_icmd(dev, 0x000850, 0x00000000);
- nv_icmd(dev, 0x000851, 0x00000000);
- nv_icmd(dev, 0x000852, 0x00000000);
- nv_icmd(dev, 0x000853, 0x00000000);
- nv_icmd(dev, 0x000854, 0x00000000);
- nv_icmd(dev, 0x000855, 0x00000000);
- nv_icmd(dev, 0x000856, 0x00000000);
- nv_icmd(dev, 0x000857, 0x00000000);
- nv_icmd(dev, 0x000738, 0x00000000);
- nv_icmd(dev, 0x0006aa, 0x00000001);
- nv_icmd(dev, 0x0006ab, 0x00000002);
- nv_icmd(dev, 0x0006ac, 0x00000080);
- nv_icmd(dev, 0x0006ad, 0x00000100);
- nv_icmd(dev, 0x0006ae, 0x00000100);
- nv_icmd(dev, 0x0006b1, 0x00000011);
- nv_icmd(dev, 0x0006bb, 0x000000cf);
- nv_icmd(dev, 0x0006ce, 0x2a712488);
- nv_icmd(dev, 0x000739, 0x4085c000);
- nv_icmd(dev, 0x00073a, 0x00000080);
- nv_icmd(dev, 0x000786, 0x80000100);
- nv_icmd(dev, 0x00073c, 0x00010100);
- nv_icmd(dev, 0x00073d, 0x02800000);
- nv_icmd(dev, 0x000787, 0x000000cf);
- nv_icmd(dev, 0x00078c, 0x00000008);
- nv_icmd(dev, 0x000792, 0x00000001);
- nv_icmd(dev, 0x000794, 0x00000001);
- nv_icmd(dev, 0x000795, 0x00000001);
- nv_icmd(dev, 0x000796, 0x00000001);
- nv_icmd(dev, 0x000797, 0x000000cf);
- nv_icmd(dev, 0x000836, 0x00000001);
- nv_icmd(dev, 0x00079a, 0x00000002);
- nv_icmd(dev, 0x000833, 0x04444480);
- nv_icmd(dev, 0x0007a1, 0x00000001);
- nv_icmd(dev, 0x0007a3, 0x00000001);
- nv_icmd(dev, 0x0007a4, 0x00000001);
- nv_icmd(dev, 0x0007a5, 0x00000001);
- nv_icmd(dev, 0x000831, 0x00000004);
- nv_icmd(dev, 0x000b07, 0x00000002);
- nv_icmd(dev, 0x000b08, 0x00000100);
- nv_icmd(dev, 0x000b09, 0x00000100);
- nv_icmd(dev, 0x000b0a, 0x00000001);
- nv_icmd(dev, 0x000a04, 0x000000ff);
- nv_icmd(dev, 0x000a0b, 0x00000040);
- nv_icmd(dev, 0x00097f, 0x00000100);
- nv_icmd(dev, 0x000a02, 0x00000001);
- nv_icmd(dev, 0x000809, 0x00000007);
- nv_icmd(dev, 0x00c221, 0x00000040);
- nv_icmd(dev, 0x00c1b0, 0x0000000f);
- nv_icmd(dev, 0x00c1b1, 0x0000000f);
- nv_icmd(dev, 0x00c1b2, 0x0000000f);
- nv_icmd(dev, 0x00c1b3, 0x0000000f);
- nv_icmd(dev, 0x00c1b4, 0x0000000f);
- nv_icmd(dev, 0x00c1b5, 0x0000000f);
- nv_icmd(dev, 0x00c1b6, 0x0000000f);
- nv_icmd(dev, 0x00c1b7, 0x0000000f);
- nv_icmd(dev, 0x00c1b8, 0x0fac6881);
- nv_icmd(dev, 0x00c1b9, 0x00fac688);
- nv_icmd(dev, 0x00c401, 0x00000001);
- nv_icmd(dev, 0x00c402, 0x00010001);
- nv_icmd(dev, 0x00c403, 0x00000001);
- nv_icmd(dev, 0x00c404, 0x00000001);
- nv_icmd(dev, 0x00c40e, 0x00000020);
- nv_icmd(dev, 0x00c500, 0x00000003);
- nv_icmd(dev, 0x01e100, 0x00000001);
- nv_icmd(dev, 0x001000, 0x00000002);
- nv_icmd(dev, 0x0006aa, 0x00000001);
- nv_icmd(dev, 0x0006ad, 0x00000100);
- nv_icmd(dev, 0x0006ae, 0x00000100);
- nv_icmd(dev, 0x0006b1, 0x00000011);
- nv_icmd(dev, 0x00078c, 0x00000008);
- nv_icmd(dev, 0x000792, 0x00000001);
- nv_icmd(dev, 0x000794, 0x00000001);
- nv_icmd(dev, 0x000795, 0x00000001);
- nv_icmd(dev, 0x000796, 0x00000001);
- nv_icmd(dev, 0x000797, 0x000000cf);
- nv_icmd(dev, 0x00079a, 0x00000002);
- nv_icmd(dev, 0x000833, 0x04444480);
- nv_icmd(dev, 0x0007a1, 0x00000001);
- nv_icmd(dev, 0x0007a3, 0x00000001);
- nv_icmd(dev, 0x0007a4, 0x00000001);
- nv_icmd(dev, 0x0007a5, 0x00000001);
- nv_icmd(dev, 0x000831, 0x00000004);
- nv_icmd(dev, 0x01e100, 0x00000001);
- nv_icmd(dev, 0x001000, 0x00000008);
- nv_icmd(dev, 0x000039, 0x00000000);
- nv_icmd(dev, 0x00003a, 0x00000000);
- nv_icmd(dev, 0x00003b, 0x00000000);
- nv_icmd(dev, 0x000380, 0x00000001);
- nv_icmd(dev, 0x000366, 0x00000000);
- nv_icmd(dev, 0x000367, 0x00000000);
- nv_icmd(dev, 0x000368, 0x00000fff);
- nv_icmd(dev, 0x000370, 0x00000000);
- nv_icmd(dev, 0x000371, 0x00000000);
- nv_icmd(dev, 0x000372, 0x000fffff);
- nv_icmd(dev, 0x000813, 0x00000006);
- nv_icmd(dev, 0x000814, 0x00000008);
- nv_icmd(dev, 0x000957, 0x00000003);
- nv_icmd(dev, 0x000818, 0x00000000);
- nv_icmd(dev, 0x000819, 0x00000000);
- nv_icmd(dev, 0x00081a, 0x00000000);
- nv_icmd(dev, 0x00081b, 0x00000000);
- nv_icmd(dev, 0x00081c, 0x00000000);
- nv_icmd(dev, 0x00081d, 0x00000000);
- nv_icmd(dev, 0x00081e, 0x00000000);
- nv_icmd(dev, 0x00081f, 0x00000000);
- nv_icmd(dev, 0x000848, 0x00000000);
- nv_icmd(dev, 0x000849, 0x00000000);
- nv_icmd(dev, 0x00084a, 0x00000000);
- nv_icmd(dev, 0x00084b, 0x00000000);
- nv_icmd(dev, 0x00084c, 0x00000000);
- nv_icmd(dev, 0x00084d, 0x00000000);
- nv_icmd(dev, 0x00084e, 0x00000000);
- nv_icmd(dev, 0x00084f, 0x00000000);
- nv_icmd(dev, 0x000850, 0x00000000);
- nv_icmd(dev, 0x000851, 0x00000000);
- nv_icmd(dev, 0x000852, 0x00000000);
- nv_icmd(dev, 0x000853, 0x00000000);
- nv_icmd(dev, 0x000854, 0x00000000);
- nv_icmd(dev, 0x000855, 0x00000000);
- nv_icmd(dev, 0x000856, 0x00000000);
- nv_icmd(dev, 0x000857, 0x00000000);
- nv_icmd(dev, 0x000738, 0x00000000);
- nv_icmd(dev, 0x000b07, 0x00000002);
- nv_icmd(dev, 0x000b08, 0x00000100);
- nv_icmd(dev, 0x000b09, 0x00000100);
- nv_icmd(dev, 0x000b0a, 0x00000001);
- nv_icmd(dev, 0x000a04, 0x000000ff);
- nv_icmd(dev, 0x00097f, 0x00000100);
- nv_icmd(dev, 0x000a02, 0x00000001);
- nv_icmd(dev, 0x000809, 0x00000007);
- nv_icmd(dev, 0x00c221, 0x00000040);
- nv_icmd(dev, 0x00c401, 0x00000001);
- nv_icmd(dev, 0x00c402, 0x00010001);
- nv_icmd(dev, 0x00c403, 0x00000001);
- nv_icmd(dev, 0x00c404, 0x00000001);
- nv_icmd(dev, 0x00c40e, 0x00000020);
- nv_icmd(dev, 0x00c500, 0x00000003);
- nv_icmd(dev, 0x01e100, 0x00000001);
- nv_icmd(dev, 0x001000, 0x00000001);
- nv_icmd(dev, 0x000b07, 0x00000002);
- nv_icmd(dev, 0x000b08, 0x00000100);
- nv_icmd(dev, 0x000b09, 0x00000100);
- nv_icmd(dev, 0x000b0a, 0x00000001);
- nv_icmd(dev, 0x01e100, 0x00000001);
- nv_wr32(dev, 0x400208, 0x00000000);
-}
-
-static void
-nv_mthd(struct drm_device *dev, u32 class, u32 mthd, u32 data)
-{
- nv_wr32(dev, 0x40448c, data);
- nv_wr32(dev, 0x404488, 0x80000000 | (mthd << 14) | class);
-}
-
-static void
-nve0_grctx_generate_a097(struct drm_device *dev)
-{
- nv_mthd(dev, 0xa097, 0x0800, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0840, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0880, 0x00000000);
- nv_mthd(dev, 0xa097, 0x08c0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0900, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0940, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0980, 0x00000000);
- nv_mthd(dev, 0xa097, 0x09c0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0804, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0844, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0884, 0x00000000);
- nv_mthd(dev, 0xa097, 0x08c4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0904, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0944, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0984, 0x00000000);
- nv_mthd(dev, 0xa097, 0x09c4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0808, 0x00000400);
- nv_mthd(dev, 0xa097, 0x0848, 0x00000400);
- nv_mthd(dev, 0xa097, 0x0888, 0x00000400);
- nv_mthd(dev, 0xa097, 0x08c8, 0x00000400);
- nv_mthd(dev, 0xa097, 0x0908, 0x00000400);
- nv_mthd(dev, 0xa097, 0x0948, 0x00000400);
- nv_mthd(dev, 0xa097, 0x0988, 0x00000400);
- nv_mthd(dev, 0xa097, 0x09c8, 0x00000400);
- nv_mthd(dev, 0xa097, 0x080c, 0x00000300);
- nv_mthd(dev, 0xa097, 0x084c, 0x00000300);
- nv_mthd(dev, 0xa097, 0x088c, 0x00000300);
- nv_mthd(dev, 0xa097, 0x08cc, 0x00000300);
- nv_mthd(dev, 0xa097, 0x090c, 0x00000300);
- nv_mthd(dev, 0xa097, 0x094c, 0x00000300);
- nv_mthd(dev, 0xa097, 0x098c, 0x00000300);
- nv_mthd(dev, 0xa097, 0x09cc, 0x00000300);
- nv_mthd(dev, 0xa097, 0x0810, 0x000000cf);
- nv_mthd(dev, 0xa097, 0x0850, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0890, 0x00000000);
- nv_mthd(dev, 0xa097, 0x08d0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0910, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0950, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0990, 0x00000000);
- nv_mthd(dev, 0xa097, 0x09d0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0814, 0x00000040);
- nv_mthd(dev, 0xa097, 0x0854, 0x00000040);
- nv_mthd(dev, 0xa097, 0x0894, 0x00000040);
- nv_mthd(dev, 0xa097, 0x08d4, 0x00000040);
- nv_mthd(dev, 0xa097, 0x0914, 0x00000040);
- nv_mthd(dev, 0xa097, 0x0954, 0x00000040);
- nv_mthd(dev, 0xa097, 0x0994, 0x00000040);
- nv_mthd(dev, 0xa097, 0x09d4, 0x00000040);
- nv_mthd(dev, 0xa097, 0x0818, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0858, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0898, 0x00000001);
- nv_mthd(dev, 0xa097, 0x08d8, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0918, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0958, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0998, 0x00000001);
- nv_mthd(dev, 0xa097, 0x09d8, 0x00000001);
- nv_mthd(dev, 0xa097, 0x081c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x085c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x089c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x08dc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x091c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x095c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x099c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x09dc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0820, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0860, 0x00000000);
- nv_mthd(dev, 0xa097, 0x08a0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x08e0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0920, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0960, 0x00000000);
- nv_mthd(dev, 0xa097, 0x09a0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x09e0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c00, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c10, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c20, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c30, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c40, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c50, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c60, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c70, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c80, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c90, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ca0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cb0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cc0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cd0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ce0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cf0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c04, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c14, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c24, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c34, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c44, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c54, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c64, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c74, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c84, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c94, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ca4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cb4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cc4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cd4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ce4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cf4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c08, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c18, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c28, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c38, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c48, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c58, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c68, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c78, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c88, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c98, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ca8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cb8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cc8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cd8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ce8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cf8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c0c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c1c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c2c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c3c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c4c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c5c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c6c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c7c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c8c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1c9c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cbc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ccc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cdc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1cfc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d00, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d10, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d20, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d30, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d40, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d50, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d60, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d70, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d80, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d90, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1da0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1db0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dc0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dd0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1de0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1df0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d04, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d14, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d24, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d34, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d44, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d54, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d64, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d74, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d84, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d94, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1da4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1db4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dc4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dd4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1de4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1df4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d08, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d18, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d28, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d38, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d48, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d58, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d68, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d78, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d88, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d98, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1da8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1db8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dc8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dd8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1de8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1df8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d0c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d1c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d2c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d3c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d4c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d5c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d6c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d7c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d8c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1d9c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dbc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dcc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ddc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1dfc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f00, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f08, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f10, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f18, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f20, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f28, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f30, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f38, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f40, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f48, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f50, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f58, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f60, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f68, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f70, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f78, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f04, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f0c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f14, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f1c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f24, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f2c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f34, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f3c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f44, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f4c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f54, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f5c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f64, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f6c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f74, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f7c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f80, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f88, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f90, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f98, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fa0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fa8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fb0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fb8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fc0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fc8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fd0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fd8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fe0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fe8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ff0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ff8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f84, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f8c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f94, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1f9c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fa4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fb4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fbc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fc4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fcc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fd4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fdc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fe4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1fec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ff4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1ffc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2000, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2040, 0x00000011);
- nv_mthd(dev, 0xa097, 0x2080, 0x00000020);
- nv_mthd(dev, 0xa097, 0x20c0, 0x00000030);
- nv_mthd(dev, 0xa097, 0x2100, 0x00000040);
- nv_mthd(dev, 0xa097, 0x2140, 0x00000051);
- nv_mthd(dev, 0xa097, 0x200c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x204c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x208c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x20cc, 0x00000001);
- nv_mthd(dev, 0xa097, 0x210c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x214c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x2010, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2050, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2090, 0x00000001);
- nv_mthd(dev, 0xa097, 0x20d0, 0x00000002);
- nv_mthd(dev, 0xa097, 0x2110, 0x00000003);
- nv_mthd(dev, 0xa097, 0x2150, 0x00000004);
- nv_mthd(dev, 0xa097, 0x0380, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03a0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03c0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03e0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0384, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03a4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03c4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03e4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0388, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03a8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03c8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03e8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x038c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03ac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03cc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x03ec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0700, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0710, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0720, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0730, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0704, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0714, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0724, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0734, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0708, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0718, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0728, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0738, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2800, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2804, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2808, 0x00000000);
- nv_mthd(dev, 0xa097, 0x280c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2810, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2814, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2818, 0x00000000);
- nv_mthd(dev, 0xa097, 0x281c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2820, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2824, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2828, 0x00000000);
- nv_mthd(dev, 0xa097, 0x282c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2830, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2834, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2838, 0x00000000);
- nv_mthd(dev, 0xa097, 0x283c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2840, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2844, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2848, 0x00000000);
- nv_mthd(dev, 0xa097, 0x284c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2850, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2854, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2858, 0x00000000);
- nv_mthd(dev, 0xa097, 0x285c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2860, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2864, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2868, 0x00000000);
- nv_mthd(dev, 0xa097, 0x286c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2870, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2874, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2878, 0x00000000);
- nv_mthd(dev, 0xa097, 0x287c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2880, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2884, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2888, 0x00000000);
- nv_mthd(dev, 0xa097, 0x288c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2890, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2894, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2898, 0x00000000);
- nv_mthd(dev, 0xa097, 0x289c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28a0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28a4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28a8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28ac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28b0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28b4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28b8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28bc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28c0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28c4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28c8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28cc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28d0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28d4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28d8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28dc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28e0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28e4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28e8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28ec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28f0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28f4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28f8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x28fc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2900, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2904, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2908, 0x00000000);
- nv_mthd(dev, 0xa097, 0x290c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2910, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2914, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2918, 0x00000000);
- nv_mthd(dev, 0xa097, 0x291c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2920, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2924, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2928, 0x00000000);
- nv_mthd(dev, 0xa097, 0x292c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2930, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2934, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2938, 0x00000000);
- nv_mthd(dev, 0xa097, 0x293c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2940, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2944, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2948, 0x00000000);
- nv_mthd(dev, 0xa097, 0x294c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2950, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2954, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2958, 0x00000000);
- nv_mthd(dev, 0xa097, 0x295c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2960, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2964, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2968, 0x00000000);
- nv_mthd(dev, 0xa097, 0x296c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2970, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2974, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2978, 0x00000000);
- nv_mthd(dev, 0xa097, 0x297c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2980, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2984, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2988, 0x00000000);
- nv_mthd(dev, 0xa097, 0x298c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2990, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2994, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2998, 0x00000000);
- nv_mthd(dev, 0xa097, 0x299c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29a0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29a4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29a8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29ac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29b0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29b4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29b8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29bc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29c0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29c4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29c8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29cc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29d0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29d4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29d8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29dc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29e0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29e4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29e8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29ec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29f0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29f4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29f8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x29fc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a00, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a20, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a40, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a60, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a80, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0aa0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ac0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ae0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b00, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b20, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b40, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b60, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b80, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ba0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bc0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0be0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a04, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a24, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a44, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a64, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a84, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0aa4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ac4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ae4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b04, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b24, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b44, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b64, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b84, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ba4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bc4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0be4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a08, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a28, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a48, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a68, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a88, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0aa8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ac8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ae8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b08, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b28, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b48, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b68, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b88, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ba8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bc8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0be8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a0c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a2c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a4c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a6c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a8c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0aac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0acc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0aec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b0c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b2c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b4c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b6c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b8c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bcc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a10, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a30, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a50, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a70, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a90, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ab0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ad0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0af0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b10, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b30, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b50, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b70, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b90, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bb0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bd0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bf0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a14, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a34, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a54, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a74, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0a94, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ab4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ad4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0af4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b14, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b34, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b54, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b74, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0b94, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bb4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bd4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0bf4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c00, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c10, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c20, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c30, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c40, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c50, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c60, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c70, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c80, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c90, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ca0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cb0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cc0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cd0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ce0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cf0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c04, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c14, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c24, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c34, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c44, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c54, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c64, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c74, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c84, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c94, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ca4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cb4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cc4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cd4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ce4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cf4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c08, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c18, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c28, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c38, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c48, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c58, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c68, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c78, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c88, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c98, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ca8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cb8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cc8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cd8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ce8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0cf8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0c0c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0c1c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0c2c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0c3c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0c4c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0c5c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0c6c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0c7c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0c8c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0c9c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0cac, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0cbc, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0ccc, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0cdc, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0cec, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0cfc, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0d00, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d08, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d10, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d18, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d20, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d28, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d30, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d38, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d04, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d0c, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d14, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d1c, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d24, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d2c, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d34, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d3c, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e00, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0e10, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0e20, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0e30, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0e40, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0e50, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0e60, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0e70, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0e80, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0e90, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ea0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0eb0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ec0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ed0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ee0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ef0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0e04, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e14, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e24, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e34, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e44, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e54, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e64, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e74, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e84, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e94, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0ea4, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0eb4, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0ec4, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0ed4, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0ee4, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0ef4, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e08, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e18, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e28, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e38, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e48, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e58, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e68, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e78, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e88, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0e98, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0ea8, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0eb8, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0ec8, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0ed8, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0ee8, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0ef8, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d40, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d48, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d50, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d58, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d44, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d4c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d54, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d5c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1e00, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e20, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e40, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e60, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e80, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ea0, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ec0, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ee0, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e04, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e24, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e44, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e64, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e84, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ea4, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ec4, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ee4, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e08, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1e28, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1e48, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1e68, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1e88, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1ea8, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1ec8, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1ee8, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1e0c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e2c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e4c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e6c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e8c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1eac, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ecc, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1eec, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e10, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e30, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e50, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e70, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e90, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1eb0, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ed0, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ef0, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e14, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1e34, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1e54, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1e74, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1e94, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1eb4, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1ed4, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1ef4, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1e18, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e38, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e58, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e78, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1e98, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1eb8, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ed8, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1ef8, 0x00000001);
- nv_mthd(dev, 0xa097, 0x3400, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3404, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3408, 0x00000000);
- nv_mthd(dev, 0xa097, 0x340c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3410, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3414, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3418, 0x00000000);
- nv_mthd(dev, 0xa097, 0x341c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3420, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3424, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3428, 0x00000000);
- nv_mthd(dev, 0xa097, 0x342c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3430, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3434, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3438, 0x00000000);
- nv_mthd(dev, 0xa097, 0x343c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3440, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3444, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3448, 0x00000000);
- nv_mthd(dev, 0xa097, 0x344c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3450, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3454, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3458, 0x00000000);
- nv_mthd(dev, 0xa097, 0x345c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3460, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3464, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3468, 0x00000000);
- nv_mthd(dev, 0xa097, 0x346c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3470, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3474, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3478, 0x00000000);
- nv_mthd(dev, 0xa097, 0x347c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3480, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3484, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3488, 0x00000000);
- nv_mthd(dev, 0xa097, 0x348c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3490, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3494, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3498, 0x00000000);
- nv_mthd(dev, 0xa097, 0x349c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34a0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34a4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34a8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34ac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34b0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34b4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34b8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34bc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34c0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34c4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34c8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34cc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34d0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34d4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34d8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34dc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34e0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34e4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34e8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34ec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34f0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34f4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34f8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x34fc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3500, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3504, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3508, 0x00000000);
- nv_mthd(dev, 0xa097, 0x350c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3510, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3514, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3518, 0x00000000);
- nv_mthd(dev, 0xa097, 0x351c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3520, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3524, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3528, 0x00000000);
- nv_mthd(dev, 0xa097, 0x352c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3530, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3534, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3538, 0x00000000);
- nv_mthd(dev, 0xa097, 0x353c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3540, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3544, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3548, 0x00000000);
- nv_mthd(dev, 0xa097, 0x354c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3550, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3554, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3558, 0x00000000);
- nv_mthd(dev, 0xa097, 0x355c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3560, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3564, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3568, 0x00000000);
- nv_mthd(dev, 0xa097, 0x356c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3570, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3574, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3578, 0x00000000);
- nv_mthd(dev, 0xa097, 0x357c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3580, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3584, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3588, 0x00000000);
- nv_mthd(dev, 0xa097, 0x358c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3590, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3594, 0x00000000);
- nv_mthd(dev, 0xa097, 0x3598, 0x00000000);
- nv_mthd(dev, 0xa097, 0x359c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35a0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35a4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35a8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35ac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35b0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35b4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35b8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35bc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35c0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35c4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35c8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35cc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35d0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35d4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35d8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35dc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35e0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35e4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35e8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35ec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35f0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35f4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35f8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x35fc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x030c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1944, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1514, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d68, 0x0000ffff);
- nv_mthd(dev, 0xa097, 0x121c, 0x0fac6881);
- nv_mthd(dev, 0xa097, 0x0fac, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1538, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0fe0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0fe4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0fe8, 0x00000014);
- nv_mthd(dev, 0xa097, 0x0fec, 0x00000040);
- nv_mthd(dev, 0xa097, 0x0ff0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x179c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1228, 0x00000400);
- nv_mthd(dev, 0xa097, 0x122c, 0x00000300);
- nv_mthd(dev, 0xa097, 0x1230, 0x00010001);
- nv_mthd(dev, 0xa097, 0x07f8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x15b4, 0x00000001);
- nv_mthd(dev, 0xa097, 0x15cc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1534, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0fb0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x15d0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x153c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x16b4, 0x00000003);
- nv_mthd(dev, 0xa097, 0x0fbc, 0x0000ffff);
- nv_mthd(dev, 0xa097, 0x0fc0, 0x0000ffff);
- nv_mthd(dev, 0xa097, 0x0fc4, 0x0000ffff);
- nv_mthd(dev, 0xa097, 0x0fc8, 0x0000ffff);
- nv_mthd(dev, 0xa097, 0x0df8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0dfc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1948, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1970, 0x00000001);
- nv_mthd(dev, 0xa097, 0x161c, 0x000009f0);
- nv_mthd(dev, 0xa097, 0x0dcc, 0x00000010);
- nv_mthd(dev, 0xa097, 0x163c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x15e4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1160, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1164, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1168, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x116c, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1170, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1174, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1178, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x117c, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1180, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1184, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1188, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x118c, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1190, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1194, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1198, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x119c, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11a0, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11a4, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11a8, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11ac, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11b0, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11b4, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11b8, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11bc, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11c0, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11c4, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11c8, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11cc, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11d0, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11d4, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11d8, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x11dc, 0x25e00040);
- nv_mthd(dev, 0xa097, 0x1880, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1884, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1888, 0x00000000);
- nv_mthd(dev, 0xa097, 0x188c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1890, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1894, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1898, 0x00000000);
- nv_mthd(dev, 0xa097, 0x189c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18a0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18a4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18a8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18ac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18b0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18b4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18b8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18bc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18c0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18c4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18c8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18cc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18d0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18d4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18d8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18dc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18e0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18e4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18e8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18ec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18f0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18f4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18f8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x18fc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0f84, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0f88, 0x00000000);
- nv_mthd(dev, 0xa097, 0x17c8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x17cc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x17d0, 0x000000ff);
- nv_mthd(dev, 0xa097, 0x17d4, 0xffffffff);
- nv_mthd(dev, 0xa097, 0x17d8, 0x00000002);
- nv_mthd(dev, 0xa097, 0x17dc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x15f4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x15f8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1434, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1438, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d74, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0dec, 0x00000001);
- nv_mthd(dev, 0xa097, 0x13a4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1318, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1644, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0748, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0de8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1648, 0x00000000);
- nv_mthd(dev, 0xa097, 0x12a4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1120, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1124, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1128, 0x00000000);
- nv_mthd(dev, 0xa097, 0x112c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1118, 0x00000000);
- nv_mthd(dev, 0xa097, 0x164c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1658, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1910, 0x00000290);
- nv_mthd(dev, 0xa097, 0x1518, 0x00000000);
- nv_mthd(dev, 0xa097, 0x165c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1520, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1604, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1570, 0x00000000);
- nv_mthd(dev, 0xa097, 0x13b0, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x13b4, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x020c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1670, 0x30201000);
- nv_mthd(dev, 0xa097, 0x1674, 0x70605040);
- nv_mthd(dev, 0xa097, 0x1678, 0xb8a89888);
- nv_mthd(dev, 0xa097, 0x167c, 0xf8e8d8c8);
- nv_mthd(dev, 0xa097, 0x166c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1680, 0x00ffff00);
- nv_mthd(dev, 0xa097, 0x12d0, 0x00000003);
- nv_mthd(dev, 0xa097, 0x12d4, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1684, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1688, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0dac, 0x00001b02);
- nv_mthd(dev, 0xa097, 0x0db0, 0x00001b02);
- nv_mthd(dev, 0xa097, 0x0db4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x168c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x15bc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x156c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x187c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1110, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0dc0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0dc4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0dc8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1234, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1690, 0x00000000);
- nv_mthd(dev, 0xa097, 0x12ac, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0790, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0794, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0798, 0x00000000);
- nv_mthd(dev, 0xa097, 0x079c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x07a0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x077c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1000, 0x00000010);
- nv_mthd(dev, 0xa097, 0x10fc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1290, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0218, 0x00000010);
- nv_mthd(dev, 0xa097, 0x12d8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x12dc, 0x00000010);
- nv_mthd(dev, 0xa097, 0x0d94, 0x00000001);
- nv_mthd(dev, 0xa097, 0x155c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1560, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1564, 0x00000fff);
- nv_mthd(dev, 0xa097, 0x1574, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1578, 0x00000000);
- nv_mthd(dev, 0xa097, 0x157c, 0x000fffff);
- nv_mthd(dev, 0xa097, 0x1354, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1610, 0x00000012);
- nv_mthd(dev, 0xa097, 0x1608, 0x00000000);
- nv_mthd(dev, 0xa097, 0x160c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x260c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x07ac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x162c, 0x00000003);
- nv_mthd(dev, 0xa097, 0x0210, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0320, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0324, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0328, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x032c, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0330, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0334, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0338, 0x3f800000);
- nv_mthd(dev, 0xa097, 0x0750, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0760, 0x39291909);
- nv_mthd(dev, 0xa097, 0x0764, 0x79695949);
- nv_mthd(dev, 0xa097, 0x0768, 0xb9a99989);
- nv_mthd(dev, 0xa097, 0x076c, 0xf9e9d9c9);
- nv_mthd(dev, 0xa097, 0x0770, 0x30201000);
- nv_mthd(dev, 0xa097, 0x0774, 0x70605040);
- nv_mthd(dev, 0xa097, 0x0778, 0x00009080);
- nv_mthd(dev, 0xa097, 0x0780, 0x39291909);
- nv_mthd(dev, 0xa097, 0x0784, 0x79695949);
- nv_mthd(dev, 0xa097, 0x0788, 0xb9a99989);
- nv_mthd(dev, 0xa097, 0x078c, 0xf9e9d9c9);
- nv_mthd(dev, 0xa097, 0x07d0, 0x30201000);
- nv_mthd(dev, 0xa097, 0x07d4, 0x70605040);
- nv_mthd(dev, 0xa097, 0x07d8, 0x00009080);
- nv_mthd(dev, 0xa097, 0x037c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0740, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0744, 0x00000000);
- nv_mthd(dev, 0xa097, 0x2600, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1918, 0x00000000);
- nv_mthd(dev, 0xa097, 0x191c, 0x00000900);
- nv_mthd(dev, 0xa097, 0x1920, 0x00000405);
- nv_mthd(dev, 0xa097, 0x1308, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1924, 0x00000000);
- nv_mthd(dev, 0xa097, 0x13ac, 0x00000000);
- nv_mthd(dev, 0xa097, 0x192c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x193c, 0x00002c1c);
- nv_mthd(dev, 0xa097, 0x0d7c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0f8c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x02c0, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1510, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1940, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ff4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0ff8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x194c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1950, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1968, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1590, 0x0000003f);
- nv_mthd(dev, 0xa097, 0x07e8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x07ec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x07f0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x07f4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x196c, 0x00000011);
- nv_mthd(dev, 0xa097, 0x02e4, 0x0000b001);
- nv_mthd(dev, 0xa097, 0x036c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0370, 0x00000000);
- nv_mthd(dev, 0xa097, 0x197c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0fcc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0fd0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x02d8, 0x00000040);
- nv_mthd(dev, 0xa097, 0x1980, 0x00000080);
- nv_mthd(dev, 0xa097, 0x1504, 0x00000080);
- nv_mthd(dev, 0xa097, 0x1984, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0300, 0x00000001);
- nv_mthd(dev, 0xa097, 0x13a8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x12ec, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1310, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1314, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1380, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1384, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1388, 0x00000001);
- nv_mthd(dev, 0xa097, 0x138c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1390, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1394, 0x00000000);
- nv_mthd(dev, 0xa097, 0x139c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1398, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1594, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1598, 0x00000001);
- nv_mthd(dev, 0xa097, 0x159c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x15a0, 0x00000001);
- nv_mthd(dev, 0xa097, 0x15a4, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0f54, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0f58, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0f5c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x19bc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0f9c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0fa0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x12cc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x12e8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x130c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1360, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1364, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1368, 0x00000000);
- nv_mthd(dev, 0xa097, 0x136c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1370, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1374, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1378, 0x00000000);
- nv_mthd(dev, 0xa097, 0x137c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x133c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1340, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1344, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1348, 0x00000001);
- nv_mthd(dev, 0xa097, 0x134c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1350, 0x00000002);
- nv_mthd(dev, 0xa097, 0x1358, 0x00000001);
- nv_mthd(dev, 0xa097, 0x12e4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x131c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1320, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1324, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1328, 0x00000000);
- nv_mthd(dev, 0xa097, 0x19c0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1140, 0x00000000);
- nv_mthd(dev, 0xa097, 0x19c4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x19c8, 0x00001500);
- nv_mthd(dev, 0xa097, 0x135c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0f90, 0x00000000);
- nv_mthd(dev, 0xa097, 0x19e0, 0x00000001);
- nv_mthd(dev, 0xa097, 0x19e4, 0x00000001);
- nv_mthd(dev, 0xa097, 0x19e8, 0x00000001);
- nv_mthd(dev, 0xa097, 0x19ec, 0x00000001);
- nv_mthd(dev, 0xa097, 0x19f0, 0x00000001);
- nv_mthd(dev, 0xa097, 0x19f4, 0x00000001);
- nv_mthd(dev, 0xa097, 0x19f8, 0x00000001);
- nv_mthd(dev, 0xa097, 0x19fc, 0x00000001);
- nv_mthd(dev, 0xa097, 0x19cc, 0x00000001);
- nv_mthd(dev, 0xa097, 0x15b8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1a00, 0x00001111);
- nv_mthd(dev, 0xa097, 0x1a04, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1a08, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1a0c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1a10, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1a14, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1a18, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1a1c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d6c, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x0d70, 0xffff0000);
- nv_mthd(dev, 0xa097, 0x10f8, 0x00001010);
- nv_mthd(dev, 0xa097, 0x0d80, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d84, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d88, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d8c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0d90, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0da0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x07a4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x07a8, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1508, 0x80000000);
- nv_mthd(dev, 0xa097, 0x150c, 0x40000000);
- nv_mthd(dev, 0xa097, 0x1668, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0318, 0x00000008);
- nv_mthd(dev, 0xa097, 0x031c, 0x00000008);
- nv_mthd(dev, 0xa097, 0x0d9c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x0374, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0378, 0x00000020);
- nv_mthd(dev, 0xa097, 0x07dc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x074c, 0x00000055);
- nv_mthd(dev, 0xa097, 0x1420, 0x00000003);
- nv_mthd(dev, 0xa097, 0x17bc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x17c0, 0x00000000);
- nv_mthd(dev, 0xa097, 0x17c4, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1008, 0x00000008);
- nv_mthd(dev, 0xa097, 0x100c, 0x00000040);
- nv_mthd(dev, 0xa097, 0x1010, 0x0000012c);
- nv_mthd(dev, 0xa097, 0x0d60, 0x00000040);
- nv_mthd(dev, 0xa097, 0x075c, 0x00000003);
- nv_mthd(dev, 0xa097, 0x1018, 0x00000020);
- nv_mthd(dev, 0xa097, 0x101c, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1020, 0x00000020);
- nv_mthd(dev, 0xa097, 0x1024, 0x00000001);
- nv_mthd(dev, 0xa097, 0x1444, 0x00000000);
- nv_mthd(dev, 0xa097, 0x1448, 0x00000000);
- nv_mthd(dev, 0xa097, 0x144c, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0360, 0x20164010);
- nv_mthd(dev, 0xa097, 0x0364, 0x00000020);
- nv_mthd(dev, 0xa097, 0x0368, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0de4, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0204, 0x00000006);
- nv_mthd(dev, 0xa097, 0x0208, 0x00000000);
- nv_mthd(dev, 0xa097, 0x02cc, 0x003fffff);
- nv_mthd(dev, 0xa097, 0x02d0, 0x003fffff);
- nv_mthd(dev, 0xa097, 0x1220, 0x00000005);
- nv_mthd(dev, 0xa097, 0x0fdc, 0x00000000);
- nv_mthd(dev, 0xa097, 0x0f98, 0x00400008);
- nv_mthd(dev, 0xa097, 0x1284, 0x08000080);
- nv_mthd(dev, 0xa097, 0x1450, 0x00400008);
- nv_mthd(dev, 0xa097, 0x1454, 0x08000080);
- nv_mthd(dev, 0xa097, 0x0214, 0x00000000);
-}
-
-static void
-nve0_grctx_generate_902d(struct drm_device *dev)
-{
- nv_mthd(dev, 0x902d, 0x0200, 0x000000cf);
- nv_mthd(dev, 0x902d, 0x0204, 0x00000001);
- nv_mthd(dev, 0x902d, 0x0208, 0x00000020);
- nv_mthd(dev, 0x902d, 0x020c, 0x00000001);
- nv_mthd(dev, 0x902d, 0x0210, 0x00000000);
- nv_mthd(dev, 0x902d, 0x0214, 0x00000080);
- nv_mthd(dev, 0x902d, 0x0218, 0x00000100);
- nv_mthd(dev, 0x902d, 0x021c, 0x00000100);
- nv_mthd(dev, 0x902d, 0x0220, 0x00000000);
- nv_mthd(dev, 0x902d, 0x0224, 0x00000000);
- nv_mthd(dev, 0x902d, 0x0230, 0x000000cf);
- nv_mthd(dev, 0x902d, 0x0234, 0x00000001);
- nv_mthd(dev, 0x902d, 0x0238, 0x00000020);
- nv_mthd(dev, 0x902d, 0x023c, 0x00000001);
- nv_mthd(dev, 0x902d, 0x0244, 0x00000080);
- nv_mthd(dev, 0x902d, 0x0248, 0x00000100);
- nv_mthd(dev, 0x902d, 0x024c, 0x00000100);
- nv_mthd(dev, 0x902d, 0x3410, 0x00000000);
-}
-
-static void
-nve0_graph_generate_unk40xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x404010, 0x0);
- nv_wr32(dev, 0x404014, 0x0);
- nv_wr32(dev, 0x404018, 0x0);
- nv_wr32(dev, 0x40401c, 0x0);
- nv_wr32(dev, 0x404020, 0x0);
- nv_wr32(dev, 0x404024, 0xe000);
- nv_wr32(dev, 0x404028, 0x0);
- nv_wr32(dev, 0x4040a8, 0x0);
- nv_wr32(dev, 0x4040ac, 0x0);
- nv_wr32(dev, 0x4040b0, 0x0);
- nv_wr32(dev, 0x4040b4, 0x0);
- nv_wr32(dev, 0x4040b8, 0x0);
- nv_wr32(dev, 0x4040bc, 0x0);
- nv_wr32(dev, 0x4040c0, 0x0);
- nv_wr32(dev, 0x4040c4, 0x0);
- nv_wr32(dev, 0x4040c8, 0xf800008f);
- nv_wr32(dev, 0x4040d0, 0x0);
- nv_wr32(dev, 0x4040d4, 0x0);
- nv_wr32(dev, 0x4040d8, 0x0);
- nv_wr32(dev, 0x4040dc, 0x0);
- nv_wr32(dev, 0x4040e0, 0x0);
- nv_wr32(dev, 0x4040e4, 0x0);
- nv_wr32(dev, 0x4040e8, 0x1000);
- nv_wr32(dev, 0x4040f8, 0x0);
- nv_wr32(dev, 0x404130, 0x0);
- nv_wr32(dev, 0x404134, 0x0);
- nv_wr32(dev, 0x404138, 0x20000040);
- nv_wr32(dev, 0x404150, 0x2e);
- nv_wr32(dev, 0x404154, 0x400);
- nv_wr32(dev, 0x404158, 0x200);
- nv_wr32(dev, 0x404164, 0x55);
- nv_wr32(dev, 0x4041a0, 0x0);
- nv_wr32(dev, 0x4041a4, 0x0);
- nv_wr32(dev, 0x4041a8, 0x0);
- nv_wr32(dev, 0x4041ac, 0x0);
- nv_wr32(dev, 0x404200, 0x0);
- nv_wr32(dev, 0x404204, 0x0);
- nv_wr32(dev, 0x404208, 0x0);
- nv_wr32(dev, 0x40420c, 0x0);
-}
-
-static void
-nve0_graph_generate_unk44xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x404404, 0x0);
- nv_wr32(dev, 0x404408, 0x0);
- nv_wr32(dev, 0x40440c, 0x0);
- nv_wr32(dev, 0x404410, 0x0);
- nv_wr32(dev, 0x404414, 0x0);
- nv_wr32(dev, 0x404418, 0x0);
- nv_wr32(dev, 0x40441c, 0x0);
- nv_wr32(dev, 0x404420, 0x0);
- nv_wr32(dev, 0x404424, 0x0);
- nv_wr32(dev, 0x404428, 0x0);
- nv_wr32(dev, 0x40442c, 0x0);
- nv_wr32(dev, 0x404430, 0x0);
- nv_wr32(dev, 0x404434, 0x0);
- nv_wr32(dev, 0x404438, 0x0);
- nv_wr32(dev, 0x404460, 0x0);
- nv_wr32(dev, 0x404464, 0x0);
- nv_wr32(dev, 0x404468, 0xffffff);
- nv_wr32(dev, 0x40446c, 0x0);
- nv_wr32(dev, 0x404480, 0x1);
- nv_wr32(dev, 0x404498, 0x1);
-}
-
-static void
-nve0_graph_generate_unk46xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x404604, 0x14);
- nv_wr32(dev, 0x404608, 0x0);
- nv_wr32(dev, 0x40460c, 0x3fff);
- nv_wr32(dev, 0x404610, 0x100);
- nv_wr32(dev, 0x404618, 0x0);
- nv_wr32(dev, 0x40461c, 0x0);
- nv_wr32(dev, 0x404620, 0x0);
- nv_wr32(dev, 0x404624, 0x0);
- nv_wr32(dev, 0x40462c, 0x0);
- nv_wr32(dev, 0x404630, 0x0);
- nv_wr32(dev, 0x404640, 0x0);
- nv_wr32(dev, 0x404654, 0x0);
- nv_wr32(dev, 0x404660, 0x0);
- nv_wr32(dev, 0x404678, 0x0);
- nv_wr32(dev, 0x40467c, 0x2);
- nv_wr32(dev, 0x404680, 0x0);
- nv_wr32(dev, 0x404684, 0x0);
- nv_wr32(dev, 0x404688, 0x0);
- nv_wr32(dev, 0x40468c, 0x0);
- nv_wr32(dev, 0x404690, 0x0);
- nv_wr32(dev, 0x404694, 0x0);
- nv_wr32(dev, 0x404698, 0x0);
- nv_wr32(dev, 0x40469c, 0x0);
- nv_wr32(dev, 0x4046a0, 0x7f0080);
- nv_wr32(dev, 0x4046a4, 0x0);
- nv_wr32(dev, 0x4046a8, 0x0);
- nv_wr32(dev, 0x4046ac, 0x0);
- nv_wr32(dev, 0x4046b0, 0x0);
- nv_wr32(dev, 0x4046b4, 0x0);
- nv_wr32(dev, 0x4046b8, 0x0);
- nv_wr32(dev, 0x4046bc, 0x0);
- nv_wr32(dev, 0x4046c0, 0x0);
- nv_wr32(dev, 0x4046c8, 0x0);
- nv_wr32(dev, 0x4046cc, 0x0);
- nv_wr32(dev, 0x4046d0, 0x0);
-}
-
-static void
-nve0_graph_generate_unk47xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x404700, 0x0);
- nv_wr32(dev, 0x404704, 0x0);
- nv_wr32(dev, 0x404708, 0x0);
- nv_wr32(dev, 0x404718, 0x0);
- nv_wr32(dev, 0x40471c, 0x0);
- nv_wr32(dev, 0x404720, 0x0);
- nv_wr32(dev, 0x404724, 0x0);
- nv_wr32(dev, 0x404728, 0x0);
- nv_wr32(dev, 0x40472c, 0x0);
- nv_wr32(dev, 0x404730, 0x0);
- nv_wr32(dev, 0x404734, 0x100);
- nv_wr32(dev, 0x404738, 0x0);
- nv_wr32(dev, 0x40473c, 0x0);
- nv_wr32(dev, 0x404744, 0x0);
- nv_wr32(dev, 0x404748, 0x0);
- nv_wr32(dev, 0x404754, 0x0);
-}
-
-static void
-nve0_graph_generate_unk58xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x405800, 0xf8000bf);
- nv_wr32(dev, 0x405830, 0x2180648);
- nv_wr32(dev, 0x405834, 0x8000000);
- nv_wr32(dev, 0x405838, 0x0);
- nv_wr32(dev, 0x405854, 0x0);
- nv_wr32(dev, 0x405870, 0x1);
- nv_wr32(dev, 0x405874, 0x1);
- nv_wr32(dev, 0x405878, 0x1);
- nv_wr32(dev, 0x40587c, 0x1);
- nv_wr32(dev, 0x405a00, 0x0);
- nv_wr32(dev, 0x405a04, 0x0);
- nv_wr32(dev, 0x405a18, 0x0);
- nv_wr32(dev, 0x405b00, 0x0);
- nv_wr32(dev, 0x405b10, 0x1000);
-}
-
-static void
-nve0_graph_generate_unk60xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x406020, 0x4103c1);
- nv_wr32(dev, 0x406028, 0x1);
- nv_wr32(dev, 0x40602c, 0x1);
- nv_wr32(dev, 0x406030, 0x1);
- nv_wr32(dev, 0x406034, 0x1);
-}
-
-static void
-nve0_graph_generate_unk64xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x4064a8, 0x0);
- nv_wr32(dev, 0x4064ac, 0x3fff);
- nv_wr32(dev, 0x4064b4, 0x0);
- nv_wr32(dev, 0x4064b8, 0x0);
- nv_wr32(dev, 0x4064c0, 0x801a00f0);
- nv_wr32(dev, 0x4064c4, 0x192ffff);
- nv_wr32(dev, 0x4064c8, 0x1800600);
- nv_wr32(dev, 0x4064cc, 0x0);
- nv_wr32(dev, 0x4064d0, 0x0);
- nv_wr32(dev, 0x4064d4, 0x0);
- nv_wr32(dev, 0x4064d8, 0x0);
- nv_wr32(dev, 0x4064dc, 0x0);
- nv_wr32(dev, 0x4064e0, 0x0);
- nv_wr32(dev, 0x4064e4, 0x0);
- nv_wr32(dev, 0x4064e8, 0x0);
- nv_wr32(dev, 0x4064ec, 0x0);
- nv_wr32(dev, 0x4064fc, 0x22a);
-}
-
-static void
-nve0_graph_generate_unk70xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x407040, 0x0);
-}
-
-static void
-nve0_graph_generate_unk78xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x407804, 0x23);
- nv_wr32(dev, 0x40780c, 0xa418820);
- nv_wr32(dev, 0x407810, 0x62080e6);
- nv_wr32(dev, 0x407814, 0x20398a4);
- nv_wr32(dev, 0x407818, 0xe629062);
- nv_wr32(dev, 0x40781c, 0xa418820);
- nv_wr32(dev, 0x407820, 0xe6);
- nv_wr32(dev, 0x4078bc, 0x103);
-}
-
-static void
-nve0_graph_generate_unk80xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x408000, 0x0);
- nv_wr32(dev, 0x408004, 0x0);
- nv_wr32(dev, 0x408008, 0x30);
- nv_wr32(dev, 0x40800c, 0x0);
- nv_wr32(dev, 0x408010, 0x0);
- nv_wr32(dev, 0x408014, 0x69);
- nv_wr32(dev, 0x408018, 0xe100e100);
- nv_wr32(dev, 0x408064, 0x0);
-}
-
-static void
-nve0_graph_generate_unk88xx(struct drm_device *dev)
-{
- nv_wr32(dev, 0x408800, 0x2802a3c);
- nv_wr32(dev, 0x408804, 0x40);
- nv_wr32(dev, 0x408808, 0x1043e005);
- nv_wr32(dev, 0x408840, 0xb);
- nv_wr32(dev, 0x408900, 0x3080b801);
- nv_wr32(dev, 0x408904, 0x62000001);
- nv_wr32(dev, 0x408908, 0xc8102f);
- nv_wr32(dev, 0x408980, 0x11d);
-}
-
-static void
-nve0_graph_generate_gpc(struct drm_device *dev)
-{
- nv_wr32(dev, 0x418380, 0x16);
- nv_wr32(dev, 0x418400, 0x38004e00);
- nv_wr32(dev, 0x418404, 0x71e0ffff);
- nv_wr32(dev, 0x41840c, 0x1008);
- nv_wr32(dev, 0x418410, 0xfff0fff);
- nv_wr32(dev, 0x418414, 0x2200fff);
- nv_wr32(dev, 0x418450, 0x0);
- nv_wr32(dev, 0x418454, 0x0);
- nv_wr32(dev, 0x418458, 0x0);
- nv_wr32(dev, 0x41845c, 0x0);
- nv_wr32(dev, 0x418460, 0x0);
- nv_wr32(dev, 0x418464, 0x0);
- nv_wr32(dev, 0x418468, 0x1);
- nv_wr32(dev, 0x41846c, 0x0);
- nv_wr32(dev, 0x418470, 0x0);
- nv_wr32(dev, 0x418600, 0x1f);
- nv_wr32(dev, 0x418684, 0xf);
- nv_wr32(dev, 0x418700, 0x2);
- nv_wr32(dev, 0x418704, 0x80);
- nv_wr32(dev, 0x418708, 0x0);
- nv_wr32(dev, 0x41870c, 0x0);
- nv_wr32(dev, 0x418710, 0x0);
- nv_wr32(dev, 0x418800, 0x7006860a);
- nv_wr32(dev, 0x418808, 0x0);
- nv_wr32(dev, 0x41880c, 0x0);
- nv_wr32(dev, 0x418810, 0x0);
- nv_wr32(dev, 0x418828, 0x44);
- nv_wr32(dev, 0x418830, 0x10000001);
- nv_wr32(dev, 0x4188d8, 0x8);
- nv_wr32(dev, 0x4188e0, 0x1000000);
- nv_wr32(dev, 0x4188e8, 0x0);
- nv_wr32(dev, 0x4188ec, 0x0);
- nv_wr32(dev, 0x4188f0, 0x0);
- nv_wr32(dev, 0x4188f4, 0x0);
- nv_wr32(dev, 0x4188f8, 0x0);
- nv_wr32(dev, 0x4188fc, 0x20100018);
- nv_wr32(dev, 0x41891c, 0xff00ff);
- nv_wr32(dev, 0x418924, 0x0);
- nv_wr32(dev, 0x418928, 0xffff00);
- nv_wr32(dev, 0x41892c, 0xff00);
- nv_wr32(dev, 0x418a00, 0x0);
- nv_wr32(dev, 0x418a04, 0x0);
- nv_wr32(dev, 0x418a08, 0x0);
- nv_wr32(dev, 0x418a0c, 0x10000);
- nv_wr32(dev, 0x418a10, 0x0);
- nv_wr32(dev, 0x418a14, 0x0);
- nv_wr32(dev, 0x418a18, 0x0);
- nv_wr32(dev, 0x418a20, 0x0);
- nv_wr32(dev, 0x418a24, 0x0);
- nv_wr32(dev, 0x418a28, 0x0);
- nv_wr32(dev, 0x418a2c, 0x10000);
- nv_wr32(dev, 0x418a30, 0x0);
- nv_wr32(dev, 0x418a34, 0x0);
- nv_wr32(dev, 0x418a38, 0x0);
- nv_wr32(dev, 0x418a40, 0x0);
- nv_wr32(dev, 0x418a44, 0x0);
- nv_wr32(dev, 0x418a48, 0x0);
- nv_wr32(dev, 0x418a4c, 0x10000);
- nv_wr32(dev, 0x418a50, 0x0);
- nv_wr32(dev, 0x418a54, 0x0);
- nv_wr32(dev, 0x418a58, 0x0);
- nv_wr32(dev, 0x418a60, 0x0);
- nv_wr32(dev, 0x418a64, 0x0);
- nv_wr32(dev, 0x418a68, 0x0);
- nv_wr32(dev, 0x418a6c, 0x10000);
- nv_wr32(dev, 0x418a70, 0x0);
- nv_wr32(dev, 0x418a74, 0x0);
- nv_wr32(dev, 0x418a78, 0x0);
- nv_wr32(dev, 0x418a80, 0x0);
- nv_wr32(dev, 0x418a84, 0x0);
- nv_wr32(dev, 0x418a88, 0x0);
- nv_wr32(dev, 0x418a8c, 0x10000);
- nv_wr32(dev, 0x418a90, 0x0);
- nv_wr32(dev, 0x418a94, 0x0);
- nv_wr32(dev, 0x418a98, 0x0);
- nv_wr32(dev, 0x418aa0, 0x0);
- nv_wr32(dev, 0x418aa4, 0x0);
- nv_wr32(dev, 0x418aa8, 0x0);
- nv_wr32(dev, 0x418aac, 0x10000);
- nv_wr32(dev, 0x418ab0, 0x0);
- nv_wr32(dev, 0x418ab4, 0x0);
- nv_wr32(dev, 0x418ab8, 0x0);
- nv_wr32(dev, 0x418ac0, 0x0);
- nv_wr32(dev, 0x418ac4, 0x0);
- nv_wr32(dev, 0x418ac8, 0x0);
- nv_wr32(dev, 0x418acc, 0x10000);
- nv_wr32(dev, 0x418ad0, 0x0);
- nv_wr32(dev, 0x418ad4, 0x0);
- nv_wr32(dev, 0x418ad8, 0x0);
- nv_wr32(dev, 0x418ae0, 0x0);
- nv_wr32(dev, 0x418ae4, 0x0);
- nv_wr32(dev, 0x418ae8, 0x0);
- nv_wr32(dev, 0x418aec, 0x10000);
- nv_wr32(dev, 0x418af0, 0x0);
- nv_wr32(dev, 0x418af4, 0x0);
- nv_wr32(dev, 0x418af8, 0x0);
- nv_wr32(dev, 0x418b00, 0x6);
- nv_wr32(dev, 0x418b08, 0xa418820);
- nv_wr32(dev, 0x418b0c, 0x62080e6);
- nv_wr32(dev, 0x418b10, 0x20398a4);
- nv_wr32(dev, 0x418b14, 0xe629062);
- nv_wr32(dev, 0x418b18, 0xa418820);
- nv_wr32(dev, 0x418b1c, 0xe6);
- nv_wr32(dev, 0x418bb8, 0x103);
- nv_wr32(dev, 0x418c08, 0x1);
- nv_wr32(dev, 0x418c10, 0x0);
- nv_wr32(dev, 0x418c14, 0x0);
- nv_wr32(dev, 0x418c18, 0x0);
- nv_wr32(dev, 0x418c1c, 0x0);
- nv_wr32(dev, 0x418c20, 0x0);
- nv_wr32(dev, 0x418c24, 0x0);
- nv_wr32(dev, 0x418c28, 0x0);
- nv_wr32(dev, 0x418c2c, 0x0);
- nv_wr32(dev, 0x418c40, 0xffffffff);
- nv_wr32(dev, 0x418c6c, 0x1);
- nv_wr32(dev, 0x418c80, 0x20200004);
- nv_wr32(dev, 0x418c8c, 0x1);
- nv_wr32(dev, 0x419000, 0x780);
- nv_wr32(dev, 0x419004, 0x0);
- nv_wr32(dev, 0x419008, 0x0);
- nv_wr32(dev, 0x419014, 0x4);
-}
-
-static void
-nve0_graph_generate_tpc(struct drm_device *dev)
-{
- nv_wr32(dev, 0x419848, 0x0);
- nv_wr32(dev, 0x419864, 0x129);
- nv_wr32(dev, 0x419888, 0x0);
- nv_wr32(dev, 0x419a00, 0xf0);
- nv_wr32(dev, 0x419a04, 0x1);
- nv_wr32(dev, 0x419a08, 0x21);
- nv_wr32(dev, 0x419a0c, 0x20000);
- nv_wr32(dev, 0x419a10, 0x0);
- nv_wr32(dev, 0x419a14, 0x200);
- nv_wr32(dev, 0x419a1c, 0xc000);
- nv_wr32(dev, 0x419a20, 0x800);
- nv_wr32(dev, 0x419a30, 0x1);
- nv_wr32(dev, 0x419ac4, 0x37f440);
- nv_wr32(dev, 0x419c00, 0xa);
- nv_wr32(dev, 0x419c04, 0x80000006);
- nv_wr32(dev, 0x419c08, 0x2);
- nv_wr32(dev, 0x419c20, 0x0);
- nv_wr32(dev, 0x419c24, 0x84210);
- nv_wr32(dev, 0x419c28, 0x3efbefbe);
- nv_wr32(dev, 0x419ce8, 0x0);
- nv_wr32(dev, 0x419cf4, 0x3203);
- nv_wr32(dev, 0x419e04, 0x0);
- nv_wr32(dev, 0x419e08, 0x0);
- nv_wr32(dev, 0x419e0c, 0x0);
- nv_wr32(dev, 0x419e10, 0x402);
- nv_wr32(dev, 0x419e44, 0x13eff2);
- nv_wr32(dev, 0x419e48, 0x0);
- nv_wr32(dev, 0x419e4c, 0x7f);
- nv_wr32(dev, 0x419e50, 0x0);
- nv_wr32(dev, 0x419e54, 0x0);
- nv_wr32(dev, 0x419e58, 0x0);
- nv_wr32(dev, 0x419e5c, 0x0);
- nv_wr32(dev, 0x419e60, 0x0);
- nv_wr32(dev, 0x419e64, 0x0);
- nv_wr32(dev, 0x419e68, 0x0);
- nv_wr32(dev, 0x419e6c, 0x0);
- nv_wr32(dev, 0x419e70, 0x0);
- nv_wr32(dev, 0x419e74, 0x0);
- nv_wr32(dev, 0x419e78, 0x0);
- nv_wr32(dev, 0x419e7c, 0x0);
- nv_wr32(dev, 0x419e80, 0x0);
- nv_wr32(dev, 0x419e84, 0x0);
- nv_wr32(dev, 0x419e88, 0x0);
- nv_wr32(dev, 0x419e8c, 0x0);
- nv_wr32(dev, 0x419e90, 0x0);
- nv_wr32(dev, 0x419e94, 0x0);
- nv_wr32(dev, 0x419e98, 0x0);
- nv_wr32(dev, 0x419eac, 0x1fcf);
- nv_wr32(dev, 0x419eb0, 0xd3f);
- nv_wr32(dev, 0x419ec8, 0x1304f);
- nv_wr32(dev, 0x419f30, 0x0);
- nv_wr32(dev, 0x419f34, 0x0);
- nv_wr32(dev, 0x419f38, 0x0);
- nv_wr32(dev, 0x419f3c, 0x0);
- nv_wr32(dev, 0x419f40, 0x0);
- nv_wr32(dev, 0x419f44, 0x0);
- nv_wr32(dev, 0x419f48, 0x0);
- nv_wr32(dev, 0x419f4c, 0x0);
- nv_wr32(dev, 0x419f58, 0x0);
- nv_wr32(dev, 0x419f78, 0xb);
-}
-
-static void
-nve0_graph_generate_tpcunk(struct drm_device *dev)
-{
- nv_wr32(dev, 0x41be24, 0x6);
- nv_wr32(dev, 0x41bec0, 0x12180000);
- nv_wr32(dev, 0x41bec4, 0x37f7f);
- nv_wr32(dev, 0x41bee4, 0x6480430);
- nv_wr32(dev, 0x41bf00, 0xa418820);
- nv_wr32(dev, 0x41bf04, 0x62080e6);
- nv_wr32(dev, 0x41bf08, 0x20398a4);
- nv_wr32(dev, 0x41bf0c, 0xe629062);
- nv_wr32(dev, 0x41bf10, 0xa418820);
- nv_wr32(dev, 0x41bf14, 0xe6);
- nv_wr32(dev, 0x41bfd0, 0x900103);
- nv_wr32(dev, 0x41bfe0, 0x400001);
- nv_wr32(dev, 0x41bfe4, 0x0);
-}
-
-int
-nve0_grctx_generate(struct nouveau_channel *chan)
-{
- struct nve0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
- struct nve0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
- struct drm_device *dev = chan->dev;
- u32 data[6] = {}, data2[2] = {}, tmp;
- u32 tpc_set = 0, tpc_mask = 0;
- u8 tpcnr[GPC_MAX], a, b;
- u8 shift, ntpcv;
- int i, gpc, tpc, id;
-
- nv_mask(dev, 0x000260, 0x00000001, 0x00000000);
- nv_wr32(dev, 0x400204, 0x00000000);
- nv_wr32(dev, 0x400208, 0x00000000);
-
- nve0_graph_generate_unk40xx(dev);
- nve0_graph_generate_unk44xx(dev);
- nve0_graph_generate_unk46xx(dev);
- nve0_graph_generate_unk47xx(dev);
- nve0_graph_generate_unk58xx(dev);
- nve0_graph_generate_unk60xx(dev);
- nve0_graph_generate_unk64xx(dev);
- nve0_graph_generate_unk70xx(dev);
- nve0_graph_generate_unk78xx(dev);
- nve0_graph_generate_unk80xx(dev);
- nve0_graph_generate_unk88xx(dev);
- nve0_graph_generate_gpc(dev);
- nve0_graph_generate_tpc(dev);
- nve0_graph_generate_tpcunk(dev);
-
- nv_wr32(dev, 0x404154, 0x0);
-
- for (i = 0; i < grch->mmio_nr * 8; i += 8) {
- u32 reg = nv_ro32(grch->mmio, i + 0);
- u32 val = nv_ro32(grch->mmio, i + 4);
- nv_wr32(dev, reg, val);
- }
-
- nv_wr32(dev, 0x418c6c, 0x1);
- nv_wr32(dev, 0x41980c, 0x10);
- nv_wr32(dev, 0x41be08, 0x4);
- nv_wr32(dev, 0x4064c0, 0x801a00f0);
- nv_wr32(dev, 0x405800, 0xf8000bf);
- nv_wr32(dev, 0x419c00, 0xa);
-
- for (tpc = 0, id = 0; tpc < 4; tpc++) {
- for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
- if (tpc < priv->tpc_nr[gpc]) {
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0698), id);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x04e8), id);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0c10 + tpc * 4), id);
- nv_wr32(dev, TPC_UNIT(gpc, tpc, 0x0088), id++);
- }
-
- nv_wr32(dev, GPC_UNIT(gpc, 0x0c08), priv->tpc_nr[gpc]);
- nv_wr32(dev, GPC_UNIT(gpc, 0x0c8c), priv->tpc_nr[gpc]);
- }
- }
-
- tmp = 0;
- for (i = 0; i < priv->gpc_nr; i++)
- tmp |= priv->tpc_nr[i] << (i * 4);
- nv_wr32(dev, 0x406028, tmp);
- nv_wr32(dev, 0x405870, tmp);
-
- nv_wr32(dev, 0x40602c, 0x0);
- nv_wr32(dev, 0x405874, 0x0);
- nv_wr32(dev, 0x406030, 0x0);
- nv_wr32(dev, 0x405878, 0x0);
- nv_wr32(dev, 0x406034, 0x0);
- nv_wr32(dev, 0x40587c, 0x0);
-
- /* calculate first set of magics */
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
-
- gpc = -1;
- for (tpc = 0; tpc < priv->tpc_total; tpc++) {
- do {
- gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpcnr[gpc]);
- tpcnr[gpc]--;
-
- data[tpc / 6] |= gpc << ((tpc % 6) * 5);
- }
-
- for (; tpc < 32; tpc++)
- data[tpc / 6] |= 7 << ((tpc % 6) * 5);
-
- /* and the second... */
- shift = 0;
- ntpcv = priv->tpc_total;
- while (!(ntpcv & (1 << 4))) {
- ntpcv <<= 1;
- shift++;
- }
-
- data2[0] = ntpcv << 16;
- data2[0] |= shift << 21;
- data2[0] |= (((1 << (0 + 5)) % ntpcv) << 24);
- data2[0] |= priv->tpc_total << 8;
- data2[0] |= priv->magic_not_rop_nr;
- for (i = 1; i < 7; i++)
- data2[1] |= ((1 << (i + 5)) % ntpcv) << ((i - 1) * 5);
-
- /* and write it all the various parts of PGRAPH */
- nv_wr32(dev, 0x418bb8, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
- for (i = 0; i < 6; i++)
- nv_wr32(dev, 0x418b08 + (i * 4), data[i]);
-
- nv_wr32(dev, 0x41bfd0, data2[0]);
- nv_wr32(dev, 0x41bfe4, data2[1]);
- for (i = 0; i < 6; i++)
- nv_wr32(dev, 0x41bf00 + (i * 4), data[i]);
-
- nv_wr32(dev, 0x4078bc, (priv->tpc_total << 8) | priv->magic_not_rop_nr);
- for (i = 0; i < 6; i++)
- nv_wr32(dev, 0x40780c + (i * 4), data[i]);
-
-
- memcpy(tpcnr, priv->tpc_nr, sizeof(priv->tpc_nr));
- for (gpc = 0; gpc < priv->gpc_nr; gpc++)
- tpc_mask |= ((1 << priv->tpc_nr[gpc]) - 1) << (gpc * 8);
-
- for (i = 0, gpc = -1, b = -1; i < 32; i++) {
- a = (i * (priv->tpc_total - 1)) / 32;
- if (a != b) {
- b = a;
- do {
- gpc = (gpc + 1) % priv->gpc_nr;
- } while (!tpcnr[gpc]);
- tpc = priv->tpc_nr[gpc] - tpcnr[gpc]--;
-
- tpc_set |= 1 << ((gpc * 8) + tpc);
- }
-
- nv_wr32(dev, 0x406800 + (i * 0x20), tpc_set);
- nv_wr32(dev, 0x406c00 + (i * 0x20), tpc_set ^ tpc_mask);
- }
-
- for (i = 0; i < 8; i++)
- nv_wr32(dev, 0x4064d0 + (i * 0x04), 0x00000000);
-
- nv_wr32(dev, 0x405b00, 0x201);
- nv_wr32(dev, 0x408850, 0x2);
- nv_wr32(dev, 0x408958, 0x2);
- nv_wr32(dev, 0x419f78, 0xa);
-
- nve0_grctx_generate_icmd(dev);
- nve0_grctx_generate_a097(dev);
- nve0_grctx_generate_902d(dev);
-
- nv_mask(dev, 0x000260, 0x00000001, 0x00000001);
- nv_wr32(dev, 0x418800, 0x7026860a); //XXX
- nv_wr32(dev, 0x41be10, 0x00bb8bc7); //XXX
- return 0;
-}