summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'trunk/2.6.22/20015_xen3-auto-common.patch1')
-rw-r--r--trunk/2.6.22/20015_xen3-auto-common.patch12101
1 files changed, 2101 insertions, 0 deletions
diff --git a/trunk/2.6.22/20015_xen3-auto-common.patch1 b/trunk/2.6.22/20015_xen3-auto-common.patch1
new file mode 100644
index 0000000..94a9b99
--- /dev/null
+++ b/trunk/2.6.22/20015_xen3-auto-common.patch1
@@ -0,0 +1,2101 @@
+Subject: xen3 common
+From: http://xenbits.xensource.com/xen-3.1-testing.hg (tip 15042)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+Index: head-2007-09-25/drivers/char/mem.c
+===================================================================
+--- head-2007-09-25.orig/drivers/char/mem.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/char/mem.c 2007-09-25 14:35:02.000000000 +0200
+@@ -101,6 +101,7 @@ static inline int valid_mmap_phys_addr_r
+ }
+ #endif
+
++#ifndef ARCH_HAS_DEV_MEM
+ /*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+@@ -223,6 +224,7 @@ static ssize_t write_mem(struct file * f
+ *ppos += written;
+ return written;
+ }
++#endif
+
+ #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
+ static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+@@ -809,6 +811,7 @@ static int open_port(struct inode * inod
+ #define open_kmem open_mem
+ #define open_oldmem open_mem
+
++#ifndef ARCH_HAS_DEV_MEM
+ static const struct file_operations mem_fops = {
+ .llseek = memory_lseek,
+ .read = read_mem,
+@@ -817,6 +820,9 @@ static const struct file_operations mem_
+ .open = open_mem,
+ .get_unmapped_area = get_unmapped_area_mem,
+ };
++#else
++extern const struct file_operations mem_fops;
++#endif
+
+ static const struct file_operations kmem_fops = {
+ .llseek = memory_lseek,
+Index: head-2007-09-25/drivers/char/tpm/Makefile
+===================================================================
+--- head-2007-09-25.orig/drivers/char/tpm/Makefile 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/char/tpm/Makefile 2007-09-25 14:35:02.000000000 +0200
+@@ -9,3 +9,5 @@ obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
+ obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
+ obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
++obj-$(CONFIG_TCG_XEN) += tpm_xenu.o
++tpm_xenu-y = tpm_xen.o tpm_vtpm.o
+Index: head-2007-09-25/drivers/char/tpm/tpm.h
+===================================================================
+--- head-2007-09-25.orig/drivers/char/tpm/tpm.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/drivers/char/tpm/tpm.h 2007-09-25 14:35:02.000000000 +0200
+@@ -106,6 +106,9 @@ struct tpm_chip {
+ struct dentry **bios_dir;
+
+ struct list_head list;
++#ifdef CONFIG_XEN
++ void *priv;
++#endif
+ };
+
+ #define to_tpm_chip(n) container_of(n, struct tpm_chip, vendor)
+@@ -122,6 +125,18 @@ static inline void tpm_write_index(int b
+ outb(value & 0xFF, base+1);
+ }
+
++#ifdef CONFIG_XEN
++static inline void *chip_get_private(const struct tpm_chip *chip)
++{
++ return chip->priv;
++}
++
++static inline void chip_set_private(struct tpm_chip *chip, void *priv)
++{
++ chip->priv = priv;
++}
++#endif
++
+ extern void tpm_get_timeouts(struct tpm_chip *);
+ extern void tpm_gen_interrupt(struct tpm_chip *);
+ extern void tpm_continue_selftest(struct tpm_chip *);
+Index: head-2007-09-25/drivers/char/tpm/tpm_vtpm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2007-09-25/drivers/char/tpm/tpm_vtpm.c 2007-09-25 14:35:02.000000000 +0200
+@@ -0,0 +1,542 @@
++/*
++ * Copyright (C) 2006 IBM Corporation
++ *
++ * Authors:
++ * Stefan Berger <stefanb@us.ibm.com>
++ *
++ * Generic device driver part for device drivers in a virtualized
++ * environment.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ */
++
++#include <asm/uaccess.h>
++#include <linux/list.h>
++#include <linux/device.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include "tpm.h"
++#include "tpm_vtpm.h"
++
++/* read status bits */
++enum {
++ STATUS_BUSY = 0x01,
++ STATUS_DATA_AVAIL = 0x02,
++ STATUS_READY = 0x04
++};
++
++struct transmission {
++ struct list_head next;
++
++ unsigned char *request;
++ size_t request_len;
++ size_t request_buflen;
++
++ unsigned char *response;
++ size_t response_len;
++ size_t response_buflen;
++
++ unsigned int flags;
++};
++
++enum {
++ TRANSMISSION_FLAG_WAS_QUEUED = 0x1
++};
++
++
++enum {
++ DATAEX_FLAG_QUEUED_ONLY = 0x1
++};
++
++
++/* local variables */
++
++/* local function prototypes */
++static int _vtpm_send_queued(struct tpm_chip *chip);
++
++
++/* =============================================================
++ * Some utility functions
++ * =============================================================
++ */
++static void vtpm_state_init(struct vtpm_state *vtpms)
++{
++ vtpms->current_request = NULL;
++ spin_lock_init(&vtpms->req_list_lock);
++ init_waitqueue_head(&vtpms->req_wait_queue);
++ INIT_LIST_HEAD(&vtpms->queued_requests);
++
++ vtpms->current_response = NULL;
++ spin_lock_init(&vtpms->resp_list_lock);
++ init_waitqueue_head(&vtpms->resp_wait_queue);
++
++ vtpms->disconnect_time = jiffies;
++}
++
++
++static inline struct transmission *transmission_alloc(void)
++{
++ return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
++}
++
++static unsigned char *
++transmission_set_req_buffer(struct transmission *t,
++ unsigned char *buffer, size_t len)
++{
++ if (t->request_buflen < len) {
++ kfree(t->request);
++ t->request = kmalloc(len, GFP_KERNEL);
++ if (!t->request) {
++ t->request_buflen = 0;
++ return NULL;
++ }
++ t->request_buflen = len;
++ }
++
++ memcpy(t->request, buffer, len);
++ t->request_len = len;
++
++ return t->request;
++}
++
++static unsigned char *
++transmission_set_res_buffer(struct transmission *t,
++ const unsigned char *buffer, size_t len)
++{
++ if (t->response_buflen < len) {
++ kfree(t->response);
++ t->response = kmalloc(len, GFP_ATOMIC);
++ if (!t->response) {
++ t->response_buflen = 0;
++ return NULL;
++ }
++ t->response_buflen = len;
++ }
++
++ memcpy(t->response, buffer, len);
++ t->response_len = len;
++
++ return t->response;
++}
++
++static inline void transmission_free(struct transmission *t)
++{
++ kfree(t->request);
++ kfree(t->response);
++ kfree(t);
++}
++
++/* =============================================================
++ * Interface with the lower layer driver
++ * =============================================================
++ */
++/*
++ * Lower layer uses this function to make a response available.
++ */
++int vtpm_vd_recv(const struct tpm_chip *chip,
++ const unsigned char *buffer, size_t count,
++ void *ptr)
++{
++ unsigned long flags;
++ int ret_size = 0;
++ struct transmission *t;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ /*
++ * The list with requests must contain one request
++ * only and the element there must be the one that
++ * was passed to me from the front-end.
++ */
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ if (vtpms->current_request != ptr) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return 0;
++ }
++
++ if ((t = vtpms->current_request)) {
++ transmission_free(t);
++ vtpms->current_request = NULL;
++ }
++
++ t = transmission_alloc();
++ if (t) {
++ if (!transmission_set_res_buffer(t, buffer, count)) {
++ transmission_free(t);
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return -ENOMEM;
++ }
++ ret_size = count;
++ vtpms->current_response = t;
++ wake_up_interruptible(&vtpms->resp_wait_queue);
++ }
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++
++ return ret_size;
++}
++
++
++/*
++ * Lower layer indicates its status (connected/disconnected)
++ */
++void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
++{
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ vtpms->vd_status = vd_status;
++ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
++ vtpms->disconnect_time = jiffies;
++ }
++}
++
++/* =============================================================
++ * Interface with the generic TPM driver
++ * =============================================================
++ */
++static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
++{
++ int rc = 0;
++ unsigned long flags;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ /*
++ * Check if the previous operation only queued the command
++ * In this case there won't be a response, so I just
++ * return from here and reset that flag. In any other
++ * case I should receive a response from the back-end.
++ */
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
++ vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ /*
++ * The first few commands (measurements) must be
++ * queued since it might not be possible to talk to the
++ * TPM, yet.
++ * Return a response of up to 30 '0's.
++ */
++
++ count = min_t(size_t, count, 30);
++ memset(buf, 0x0, count);
++ return count;
++ }
++ /*
++ * Check whether something is in the responselist and if
++ * there's nothing in the list wait for something to appear.
++ */
++
++ if (!vtpms->current_response) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
++ 1000);
++ spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
++ }
++
++ if (vtpms->current_response) {
++ struct transmission *t = vtpms->current_response;
++ vtpms->current_response = NULL;
++ rc = min(count, t->response_len);
++ memcpy(buf, t->response, rc);
++ transmission_free(t);
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return rc;
++}
++
++static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
++{
++ int rc = 0;
++ unsigned long flags;
++ struct transmission *t = transmission_alloc();
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ if (!t)
++ return -ENOMEM;
++ /*
++ * If there's a current request, it must be the
++ * previous request that has timed out.
++ */
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ if (vtpms->current_request != NULL) {
++ printk("WARNING: Sending although there is a request outstanding.\n"
++ " Previous request must have timed out.\n");
++ transmission_free(vtpms->current_request);
++ vtpms->current_request = NULL;
++ }
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ /*
++ * Queue the packet if the driver below is not
++ * ready, yet, or there is any packet already
++ * in the queue.
++ * If the driver below is ready, unqueue all
++ * packets first before sending our current
++ * packet.
++ * For each unqueued packet, except for the
++ * last (=current) packet, call the function
++ * tpm_xen_recv to wait for the response to come
++ * back.
++ */
++ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
++ if (time_after(jiffies,
++ vtpms->disconnect_time + HZ * 10)) {
++ rc = -ENOENT;
++ } else {
++ goto queue_it;
++ }
++ } else {
++ /*
++ * Send all queued packets.
++ */
++ if (_vtpm_send_queued(chip) == 0) {
++
++ vtpms->current_request = t;
++
++ rc = vtpm_vd_send(vtpms->tpm_private,
++ buf,
++ count,
++ t);
++ /*
++ * The generic TPM driver will call
++ * the function to receive the response.
++ */
++ if (rc < 0) {
++ vtpms->current_request = NULL;
++ goto queue_it;
++ }
++ } else {
++queue_it:
++ if (!transmission_set_req_buffer(t, buf, count)) {
++ transmission_free(t);
++ rc = -ENOMEM;
++ goto exit;
++ }
++ /*
++ * An error occurred. Don't event try
++ * to send the current request. Just
++ * queue it.
++ */
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
++ list_add_tail(&t->next, &vtpms->queued_requests);
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++ }
++ }
++
++exit:
++ return rc;
++}
++
++
++/*
++ * Send all queued requests.
++ */
++static int _vtpm_send_queued(struct tpm_chip *chip)
++{
++ int rc;
++ int error = 0;
++ long flags;
++ unsigned char buffer[1];
++ struct vtpm_state *vtpms;
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++
++ while (!list_empty(&vtpms->queued_requests)) {
++ /*
++ * Need to dequeue them.
++ * Read the result into a dummy buffer.
++ */
++ struct transmission *qt = (struct transmission *)
++ vtpms->queued_requests.next;
++ list_del(&qt->next);
++ vtpms->current_request = qt;
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ rc = vtpm_vd_send(vtpms->tpm_private,
++ qt->request,
++ qt->request_len,
++ qt);
++
++ if (rc < 0) {
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ if ((qt = vtpms->current_request) != NULL) {
++ /*
++ * requeue it at the beginning
++ * of the list
++ */
++ list_add(&qt->next,
++ &vtpms->queued_requests);
++ }
++ vtpms->current_request = NULL;
++ error = 1;
++ break;
++ }
++ /*
++ * After this point qt is not valid anymore!
++ * It is freed when the front-end is delivering
++ * the data by calling tpm_recv
++ */
++ /*
++ * Receive response into provided dummy buffer
++ */
++ rc = vtpm_recv(chip, buffer, sizeof(buffer));
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ }
++
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ return error;
++}
++
++static void vtpm_cancel(struct tpm_chip *chip)
++{
++ unsigned long flags;
++ struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->resp_list_lock,flags);
++
++ if (!vtpms->current_response && vtpms->current_request) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ interruptible_sleep_on(&vtpms->resp_wait_queue);
++ spin_lock_irqsave(&vtpms->resp_list_lock,flags);
++ }
++
++ if (vtpms->current_response) {
++ struct transmission *t = vtpms->current_response;
++ vtpms->current_response = NULL;
++ transmission_free(t);
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
++}
++
++static u8 vtpm_status(struct tpm_chip *chip)
++{
++ u8 rc = 0;
++ unsigned long flags;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ /*
++ * Data are available if:
++ * - there's a current response
++ * - the last packet was queued only (this is fake, but necessary to
++ * get the generic TPM layer to call the receive function.)
++ */
++ if (vtpms->current_response ||
++ 0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
++ rc = STATUS_DATA_AVAIL;
++ } else if (!vtpms->current_response && !vtpms->current_request) {
++ rc = STATUS_READY;
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return rc;
++}
++
++static struct file_operations vtpm_ops = {
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .open = tpm_open,
++ .read = tpm_read,
++ .write = tpm_write,
++ .release = tpm_release,
++};
++
++static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
++static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
++static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
++static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
++static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
++static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
++ NULL);
++static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
++static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
++
++static struct attribute *vtpm_attrs[] = {
++ &dev_attr_pubek.attr,
++ &dev_attr_pcrs.attr,
++ &dev_attr_enabled.attr,
++ &dev_attr_active.attr,
++ &dev_attr_owned.attr,
++ &dev_attr_temp_deactivated.attr,
++ &dev_attr_caps.attr,
++ &dev_attr_cancel.attr,
++ NULL,
++};
++
++static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
++
++#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
++
++static struct tpm_vendor_specific tpm_vtpm = {
++ .recv = vtpm_recv,
++ .send = vtpm_send,
++ .cancel = vtpm_cancel,
++ .status = vtpm_status,
++ .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
++ .req_complete_val = STATUS_DATA_AVAIL,
++ .req_canceled = STATUS_READY,
++ .attr_group = &vtpm_attr_grp,
++ .miscdev = {
++ .fops = &vtpm_ops,
++ },
++ .duration = {
++ TPM_LONG_TIMEOUT,
++ TPM_LONG_TIMEOUT,
++ TPM_LONG_TIMEOUT,
++ },
++};
++
++struct tpm_chip *init_vtpm(struct device *dev,
++ struct tpm_private *tp)
++{
++ long rc;
++ struct tpm_chip *chip;
++ struct vtpm_state *vtpms;
++
++ vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
++ if (!vtpms)
++ return ERR_PTR(-ENOMEM);
++
++ vtpm_state_init(vtpms);
++ vtpms->tpm_private = tp;
++
++ chip = tpm_register_hardware(dev, &tpm_vtpm);
++ if (!chip) {
++ rc = -ENODEV;
++ goto err_free_mem;
++ }
++
++ chip_set_private(chip, vtpms);
++
++ return chip;
++
++err_free_mem:
++ kfree(vtpms);
++
++ return ERR_PTR(rc);
++}
++
++void cleanup_vtpm(struct device *dev)
++{
++ struct tpm_chip *chip = dev_get_drvdata(dev);
++ struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
++ tpm_remove_hardware(dev);
++ kfree(vtpms);
++}
+Index: head-2007-09-25/drivers/char/tpm/tpm_vtpm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2007-09-25/drivers/char/tpm/tpm_vtpm.h 2007-09-25 14:35:02.000000000 +0200
+@@ -0,0 +1,55 @@
++#ifndef TPM_VTPM_H
++#define TPM_VTPM_H
++
++struct tpm_chip;
++struct tpm_private;
++
++struct vtpm_state {
++ struct transmission *current_request;
++ spinlock_t req_list_lock;
++ wait_queue_head_t req_wait_queue;
++
++ struct list_head queued_requests;
++
++ struct transmission *current_response;
++ spinlock_t resp_list_lock;
++ wait_queue_head_t resp_wait_queue; // processes waiting for responses
++
++ u8 vd_status;
++ u8 flags;
++
++ unsigned long disconnect_time;
++
++ /*
++ * The following is a private structure of the underlying
++ * driver. It is passed as parameter in the send function.
++ */
++ struct tpm_private *tpm_private;
++};
++
++
++enum vdev_status {
++ TPM_VD_STATUS_DISCONNECTED = 0x0,
++ TPM_VD_STATUS_CONNECTED = 0x1
++};
++
++/* this function is called from tpm_vtpm.c */
++int vtpm_vd_send(struct tpm_private * tp,
++ const u8 * buf, size_t count, void *ptr);
++
++/* these functions are offered by tpm_vtpm.c */
++struct tpm_chip *init_vtpm(struct device *,
++ struct tpm_private *);
++void cleanup_vtpm(struct device *);
++int vtpm_vd_recv(const struct tpm_chip* chip,
++ const unsigned char *buffer, size_t count, void *ptr);
++void vtpm_vd_status(const struct tpm_chip *, u8 status);
++
++static inline struct tpm_private *tpm_private_from_dev(struct device *dev)
++{
++ struct tpm_chip *chip = dev_get_drvdata(dev);
++ struct vtpm_state *vtpms = chip_get_private(chip);
++ return vtpms->tpm_private;
++}
++
++#endif
+Index: head-2007-09-25/drivers/char/tpm/tpm_xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2007-09-25/drivers/char/tpm/tpm_xen.c 2007-09-25 14:35:02.000000000 +0200
+@@ -0,0 +1,720 @@
++/*
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netfront/netfront.c
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/errno.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/mutex.h>
++#include <asm/uaccess.h>
++#include <xen/evtchn.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/tpmif.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include "tpm.h"
++#include "tpm_vtpm.h"
++
++#undef DEBUG
++
++/* local structures */
++struct tpm_private {
++ struct tpm_chip *chip;
++
++ tpmif_tx_interface_t *tx;
++ atomic_t refcnt;
++ unsigned int irq;
++ u8 is_connected;
++ u8 is_suspended;
++
++ spinlock_t tx_lock;
++
++ struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
++
++ atomic_t tx_busy;
++ void *tx_remember;
++
++ domid_t backend_id;
++ wait_queue_head_t wait_q;
++
++ struct xenbus_device *dev;
++ int ring_ref;
++};
++
++struct tx_buffer {
++ unsigned int size; // available space in data
++ unsigned int len; // used space in data
++ unsigned char *data; // pointer to a page
++};
++
++
++/* locally visible variables */
++static grant_ref_t gref_head;
++static struct tpm_private *my_priv;
++
++/* local function prototypes */
++static irqreturn_t tpmif_int(int irq,
++ void *tpm_priv,
++ struct pt_regs *ptregs);
++static void tpmif_rx_action(unsigned long unused);
++static int tpmif_connect(struct xenbus_device *dev,
++ struct tpm_private *tp,
++ domid_t domid);
++static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
++static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
++static void tpmif_free_tx_buffers(struct tpm_private *tp);
++static void tpmif_set_connected_state(struct tpm_private *tp,
++ u8 newstate);
++static int tpm_xmit(struct tpm_private *tp,
++ const u8 * buf, size_t count, int userbuffer,
++ void *remember);
++static void destroy_tpmring(struct tpm_private *tp);
++void __exit tpmif_exit(void);
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
++
++#define GRANT_INVALID_REF 0
++
++
++static inline int
++tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len,
++ int isuserbuffer)
++{
++ int copied = len;
++
++ if (len > txb->size)
++ copied = txb->size;
++ if (isuserbuffer) {
++ if (copy_from_user(txb->data, src, copied))
++ return -EFAULT;
++ } else {
++ memcpy(txb->data, src, copied);
++ }
++ txb->len = len;
++ return copied;
++}
++
++static inline struct tx_buffer *tx_buffer_alloc(void)
++{
++ struct tx_buffer *txb;
++
++ txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL);
++ if (!txb)
++ return NULL;
++
++ txb->len = 0;
++ txb->size = PAGE_SIZE;
++ txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
++ if (txb->data == NULL) {
++ kfree(txb);
++ txb = NULL;
++ }
++
++ return txb;
++}
++
++
++static inline void tx_buffer_free(struct tx_buffer *txb)
++{
++ if (txb) {
++ free_page((long)txb->data);
++ kfree(txb);
++ }
++}
++
++/**************************************************************
++ Utility function for the tpm_private structure
++**************************************************************/
++static void tpm_private_init(struct tpm_private *tp)
++{
++ spin_lock_init(&tp->tx_lock);
++ init_waitqueue_head(&tp->wait_q);
++ atomic_set(&tp->refcnt, 1);
++}
++
++static void tpm_private_put(void)
++{
++ if (!atomic_dec_and_test(&my_priv->refcnt))
++ return;
++
++ tpmif_free_tx_buffers(my_priv);
++ kfree(my_priv);
++ my_priv = NULL;
++}
++
++static struct tpm_private *tpm_private_get(void)
++{
++ int err;
++
++ if (my_priv) {
++ atomic_inc(&my_priv->refcnt);
++ return my_priv;
++ }
++
++ my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
++ if (!my_priv)
++ return NULL;
++
++ tpm_private_init(my_priv);
++ err = tpmif_allocate_tx_buffers(my_priv);
++ if (err < 0)
++ tpm_private_put();
++
++ return my_priv;
++}
++
++/**************************************************************
++
++ The interface to let the tpm plugin register its callback
++ function and send data to another partition using this module
++
++**************************************************************/
++
++static DEFINE_MUTEX(suspend_lock);
++/*
++ * Send data via this module by calling this function
++ */
++int vtpm_vd_send(struct tpm_private *tp,
++ const u8 * buf, size_t count, void *ptr)
++{
++ int sent;
++
++ mutex_lock(&suspend_lock);
++ sent = tpm_xmit(tp, buf, count, 0, ptr);
++ mutex_unlock(&suspend_lock);
++
++ return sent;
++}
++
++/**************************************************************
++ XENBUS support code
++**************************************************************/
++
++static int setup_tpmring(struct xenbus_device *dev,
++ struct tpm_private *tp)
++{
++ tpmif_tx_interface_t *sring;
++ int err;
++
++ tp->ring_ref = GRANT_INVALID_REF;
++
++ sring = (void *)__get_free_page(GFP_KERNEL);
++ if (!sring) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++ return -ENOMEM;
++ }
++ tp->tx = sring;
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
++ if (err < 0) {
++ free_page((unsigned long)sring);
++ tp->tx = NULL;
++ xenbus_dev_fatal(dev, err, "allocating grant reference");
++ goto fail;
++ }
++ tp->ring_ref = err;
++
++ err = tpmif_connect(dev, tp, dev->otherend_id);
++ if (err)
++ goto fail;
++
++ return 0;
++fail:
++ destroy_tpmring(tp);
++ return err;
++}
++
++
++static void destroy_tpmring(struct tpm_private *tp)
++{
++ tpmif_set_connected_state(tp, 0);
++
++ if (tp->ring_ref != GRANT_INVALID_REF) {
++ gnttab_end_foreign_access(tp->ring_ref, 0,
++ (unsigned long)tp->tx);
++ tp->ring_ref = GRANT_INVALID_REF;
++ tp->tx = NULL;
++ }
++
++ if (tp->irq)
++ unbind_from_irqhandler(tp->irq, tp);
++
++ tp->irq = 0;
++}
++
++
++static int talk_to_backend(struct xenbus_device *dev,
++ struct tpm_private *tp)
++{
++ const char *message = NULL;
++ int err;
++ struct xenbus_transaction xbt;
++
++ err = setup_tpmring(dev, tp);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "setting up ring");
++ goto out;
++ }
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto destroy_tpmring;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename,
++ "ring-ref","%u", tp->ring_ref);
++ if (err) {
++ message = "writing ring-ref";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(tp->irq));
++ if (err) {
++ message = "writing event-channel";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err) {
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto destroy_tpmring;
++ }
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ return 0;
++
++abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ if (message)
++ xenbus_dev_error(dev, err, "%s", message);
++destroy_tpmring:
++ destroy_tpmring(tp);
++out:
++ return err;
++}
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ DPRINTK("\n");
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ case XenbusStateUnknown:
++ break;
++
++ case XenbusStateConnected:
++ tpmif_set_connected_state(tp, 1);
++ break;
++
++ case XenbusStateClosing:
++ tpmif_set_connected_state(tp, 0);
++ xenbus_frontend_closed(dev);
++ break;
++
++ case XenbusStateClosed:
++ tpmif_set_connected_state(tp, 0);
++ if (tp->is_suspended == 0)
++ device_unregister(&dev->dev);
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++static int tpmfront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ int handle;
++ struct tpm_private *tp = tpm_private_get();
++
++ if (!tp)
++ return -ENOMEM;
++
++ tp->chip = init_vtpm(&dev->dev, tp);
++ if (IS_ERR(tp->chip))
++ return PTR_ERR(tp->chip);
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename,
++ "handle", "%i", &handle);
++ if (XENBUS_EXIST_ERR(err))
++ return err;
++
++ if (err < 0) {
++ xenbus_dev_fatal(dev,err,"reading virtual-device");
++ return err;
++ }
++
++ tp->dev = dev;
++
++ err = talk_to_backend(dev, tp);
++ if (err) {
++ tpm_private_put();
++ return err;
++ }
++
++ return 0;
++}
++
++
++static int tpmfront_remove(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ destroy_tpmring(tp);
++ cleanup_vtpm(&dev->dev);
++ return 0;
++}
++
++static int tpmfront_suspend(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ u32 ctr;
++
++ /* Take the lock, preventing any application from sending. */
++ mutex_lock(&suspend_lock);
++ tp->is_suspended = 1;
++
++ for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) {
++ if ((ctr % 10) == 0)
++ printk("TPM-FE [INFO]: Waiting for outstanding "
++ "request.\n");
++ /* Wait for a request to be responded to. */
++ interruptible_sleep_on_timeout(&tp->wait_q, 100);
++ }
++
++ return 0;
++}
++
++static int tpmfront_suspend_finish(struct tpm_private *tp)
++{
++ tp->is_suspended = 0;
++ /* Allow applications to send again. */
++ mutex_unlock(&suspend_lock);
++ return 0;
++}
++
++static int tpmfront_suspend_cancel(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ return tpmfront_suspend_finish(tp);
++}
++
++static int tpmfront_resume(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ destroy_tpmring(tp);
++ return talk_to_backend(dev, tp);
++}
++
++static int tpmif_connect(struct xenbus_device *dev,
++ struct tpm_private *tp,
++ domid_t domid)
++{
++ int err;
++
++ tp->backend_id = domid;
++
++ err = bind_listening_port_to_irqhandler(
++ domid, tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
++ if (err <= 0) {
++ WPRINTK("bind_listening_port_to_irqhandler failed "
++ "(err=%d)\n", err);
++ return err;
++ }
++ tp->irq = err;
++
++ return 0;
++}
++
++static struct xenbus_device_id tpmfront_ids[] = {
++ { "vtpm" },
++ { "" }
++};
++
++static struct xenbus_driver tpmfront = {
++ .name = "vtpm",
++ .owner = THIS_MODULE,
++ .ids = tpmfront_ids,
++ .probe = tpmfront_probe,
++ .remove = tpmfront_remove,
++ .resume = tpmfront_resume,
++ .otherend_changed = backend_changed,
++ .suspend = tpmfront_suspend,
++ .suspend_cancel = tpmfront_suspend_cancel,
++};
++
++static void __init init_tpm_xenbus(void)
++{
++ xenbus_register_frontend(&tpmfront);
++}
++
++static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
++{
++ unsigned int i;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
++ tp->tx_buffers[i] = tx_buffer_alloc();
++ if (!tp->tx_buffers[i]) {
++ tpmif_free_tx_buffers(tp);
++ return -ENOMEM;
++ }
++ }
++ return 0;
++}
++
++static void tpmif_free_tx_buffers(struct tpm_private *tp)
++{
++ unsigned int i;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
++ tx_buffer_free(tp->tx_buffers[i]);
++}
++
++static void tpmif_rx_action(unsigned long priv)
++{
++ struct tpm_private *tp = (struct tpm_private *)priv;
++ int i = 0;
++ unsigned int received;
++ unsigned int offset = 0;
++ u8 *buffer;
++ tpmif_tx_request_t *tx = &tp->tx->ring[i].req;
++
++ atomic_set(&tp->tx_busy, 0);
++ wake_up_interruptible(&tp->wait_q);
++
++ received = tx->size;
++
++ buffer = kmalloc(received, GFP_ATOMIC);
++ if (!buffer)
++ return;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
++ struct tx_buffer *txb = tp->tx_buffers[i];
++ tpmif_tx_request_t *tx;
++ unsigned int tocopy;
++
++ tx = &tp->tx->ring[i].req;
++ tocopy = tx->size;
++ if (tocopy > PAGE_SIZE)
++ tocopy = PAGE_SIZE;
++
++ memcpy(&buffer[offset], txb->data, tocopy);
++
++ gnttab_release_grant_reference(&gref_head, tx->ref);
++
++ offset += tocopy;
++ }
++
++ vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember);
++ kfree(buffer);
++}
++
++
++static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
++{
++ struct tpm_private *tp = tpm_priv;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tp->tx_lock, flags);
++ tpmif_rx_tasklet.data = (unsigned long)tp;
++ tasklet_schedule(&tpmif_rx_tasklet);
++ spin_unlock_irqrestore(&tp->tx_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++
++static int tpm_xmit(struct tpm_private *tp,
++ const u8 * buf, size_t count, int isuserbuffer,
++ void *remember)
++{
++ tpmif_tx_request_t *tx;
++ TPMIF_RING_IDX i;
++ unsigned int offset = 0;
++
++ spin_lock_irq(&tp->tx_lock);
++
++ if (unlikely(atomic_read(&tp->tx_busy))) {
++ printk("tpm_xmit: There's an outstanding request/response "
++ "on the way!\n");
++ spin_unlock_irq(&tp->tx_lock);
++ return -EBUSY;
++ }
++
++ if (tp->is_connected != 1) {
++ spin_unlock_irq(&tp->tx_lock);
++ return -EIO;
++ }
++
++ for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
++ struct tx_buffer *txb = tp->tx_buffers[i];
++ int copied;
++
++ if (!txb) {
++ DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
++ "Not transmitting anything!\n", i);
++ spin_unlock_irq(&tp->tx_lock);
++ return -EFAULT;
++ }
++
++ copied = tx_buffer_copy(txb, &buf[offset], count,
++ isuserbuffer);
++ if (copied < 0) {
++ /* An error occurred */
++ spin_unlock_irq(&tp->tx_lock);
++ return copied;
++ }
++ count -= copied;
++ offset += copied;
++
++ tx = &tp->tx->ring[i].req;
++ tx->addr = virt_to_machine(txb->data);
++ tx->size = txb->len;
++
++ DPRINTK("First 4 characters sent by TPM-FE are "
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
++
++ /* Get the granttable reference for this page. */
++ tx->ref = gnttab_claim_grant_reference(&gref_head);
++ if (tx->ref == -ENOSPC) {
++ spin_unlock_irq(&tp->tx_lock);
++ DPRINTK("Grant table claim reference failed in "
++ "func:%s line:%d file:%s\n",
++ __FUNCTION__, __LINE__, __FILE__);
++ return -ENOSPC;
++ }
++ gnttab_grant_foreign_access_ref(tx->ref,
++ tp->backend_id,
++ virt_to_mfn(txb->data),
++ 0 /*RW*/);
++ wmb();
++ }
++
++ atomic_set(&tp->tx_busy, 1);
++ tp->tx_remember = remember;
++
++ mb();
++
++ notify_remote_via_irq(tp->irq);
++
++ spin_unlock_irq(&tp->tx_lock);
++ return offset;
++}
++
++
++static void tpmif_notify_upperlayer(struct tpm_private *tp)
++{
++ /* Notify upper layer about the state of the connection to the BE. */
++ vtpm_vd_status(tp->chip, (tp->is_connected
++ ? TPM_VD_STATUS_CONNECTED
++ : TPM_VD_STATUS_DISCONNECTED));
++}
++
++
++static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
++{
++ /*
++ * Don't notify upper layer if we are in suspend mode and
++ * should disconnect - assumption is that we will resume
++ * The mutex keeps apps from sending.
++ */
++ if (is_connected == 0 && tp->is_suspended == 1)
++ return;
++
++ /*
++ * Unlock the mutex if we are connected again
++ * after being suspended - now resuming.
++ * This also removes the suspend state.
++ */
++ if (is_connected == 1 && tp->is_suspended == 1)
++ tpmfront_suspend_finish(tp);
++
++ if (is_connected != tp->is_connected) {
++ tp->is_connected = is_connected;
++ tpmif_notify_upperlayer(tp);
++ }
++}
++
++
++
++/* =================================================================
++ * Initialization function.
++ * =================================================================
++ */
++
++
++static int __init tpmif_init(void)
++{
++ struct tpm_private *tp;
++
++ if (is_initial_xendomain())
++ return -EPERM;
++
++ tp = tpm_private_get();
++ if (!tp)
++ return -ENOMEM;
++
++ IPRINTK("Initialising the vTPM driver.\n");
++ if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE,
++ &gref_head) < 0) {
++ tpm_private_put();
++ return -EFAULT;
++ }
++
++ init_tpm_xenbus();
++ return 0;
++}
++
++
++module_init(tpmif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2007-09-25/include/linux/elfnote.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/elfnote.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/elfnote.h 2007-09-25 14:35:02.000000000 +0200
+@@ -38,7 +38,7 @@
+ * e.g. ELFNOTE(XYZCo, 42, .asciz, "forty-two")
+ * ELFNOTE(XYZCo, 12, .long, 0xdeadbeef)
+ */
+-#define ELFNOTE(name, type, desctype, descdata) \
++#define ELFNOTE(name, type, desctype, descdata...) \
+ .pushsection .note.name, "",@note ; \
+ .align 4 ; \
+ .long 2f - 1f /* namesz */ ; \
+Index: head-2007-09-25/include/linux/gfp.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/gfp.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/gfp.h 2007-09-25 14:35:02.000000000 +0200
+@@ -115,7 +115,11 @@ static inline enum zone_type gfp_zone(gf
+ */
+
+ #ifndef HAVE_ARCH_FREE_PAGE
+-static inline void arch_free_page(struct page *page, int order) { }
++/*
++ * If arch_free_page returns non-zero then the generic free_page code can
++ * immediately bail: the arch-specific function has done all the work.
++ */
++static inline int arch_free_page(struct page *page, int order) { return 0; }
+ #endif
+ #ifndef HAVE_ARCH_ALLOC_PAGE
+ static inline void arch_alloc_page(struct page *page, int order) { }
+Index: head-2007-09-25/include/linux/interrupt.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/interrupt.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/interrupt.h 2007-09-25 14:35:02.000000000 +0200
+@@ -207,6 +207,12 @@ static inline int disable_irq_wake(unsig
+
+ #endif /* CONFIG_GENERIC_HARDIRQS */
+
++#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED
++int irq_ignore_unhandled(unsigned int irq);
++#else
++#define irq_ignore_unhandled(irq) 0
++#endif
++
+ #ifndef __ARCH_SET_SOFTIRQ_PENDING
+ #define set_softirq_pending(x) (local_softirq_pending() = (x))
+ #define or_softirq_pending(x) (local_softirq_pending() |= (x))
+Index: head-2007-09-25/include/linux/kexec.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/kexec.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/kexec.h 2007-09-25 14:35:02.000000000 +0200
+@@ -46,6 +46,13 @@
+ KEXEC_CORE_NOTE_NAME_BYTES + \
+ KEXEC_CORE_NOTE_DESC_BYTES )
+
++#ifndef KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page) page_to_pfn(page)
++#define kexec_pfn_to_page(pfn) pfn_to_page(pfn)
++#define kexec_virt_to_phys(addr) virt_to_phys(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(addr)
++#endif
++
+ /*
+ * This structure is used to hold the arguments that are used when loading
+ * kernel binaries.
+@@ -106,6 +113,12 @@ struct kimage {
+ extern NORET_TYPE void machine_kexec(struct kimage *image) ATTRIB_NORET;
+ extern int machine_kexec_prepare(struct kimage *image);
+ extern void machine_kexec_cleanup(struct kimage *image);
++#ifdef CONFIG_XEN
++extern int xen_machine_kexec_load(struct kimage *image);
++extern void xen_machine_kexec_unload(struct kimage *image);
++extern void xen_machine_kexec_setup_resources(void);
++extern void xen_machine_kexec_register_resources(struct resource *res);
++#endif
+ extern asmlinkage long sys_kexec_load(unsigned long entry,
+ unsigned long nr_segments,
+ struct kexec_segment __user *segments,
+Index: head-2007-09-25/include/linux/mm.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/mm.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/mm.h 2007-09-25 14:35:02.000000000 +0200
+@@ -169,6 +169,9 @@ extern unsigned int kobjsize(const void
+ #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
+ #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
+ #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
++#ifdef CONFIG_XEN
++#define VM_FOREIGN 0x08000000 /* Has pages belonging to another VM */
++#endif
+
+ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+@@ -208,6 +211,10 @@ struct vm_operations_struct {
+ /* notification that a previously read-only page is about to become
+ * writable, if an error is returned it will cause a SIGBUS */
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
++ /* Area-specific function for clearing the PTE at @ptep. Returns the
++ * original value of @ptep. */
++ pte_t (*zap_pte)(struct vm_area_struct *vma,
++ unsigned long addr, pte_t *ptep, int is_fullmm);
+ #ifdef CONFIG_NUMA
+ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+ struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
+Index: head-2007-09-25/include/linux/page-flags.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/page-flags.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/page-flags.h 2007-09-25 14:35:02.000000000 +0200
+@@ -104,6 +104,8 @@
+ #define PG_uncached 31 /* Page has been mapped as uncached */
+ #endif
+
++#define PG_foreign 20 /* Page is owned by foreign allocator. */
++
+ /*
+ * Manipulation of page state flags
+ */
+@@ -270,6 +272,18 @@ static inline void __ClearPageTail(struc
+ #define SetPageUncached(page) set_bit(PG_uncached, &(page)->flags)
+ #define ClearPageUncached(page) clear_bit(PG_uncached, &(page)->flags)
+
++#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
++#define SetPageForeign(page, dtor) do { \
++ set_bit(PG_foreign, &(page)->flags); \
++ (page)->index = (long)(dtor); \
++} while (0)
++#define ClearPageForeign(page) do { \
++ clear_bit(PG_foreign, &(page)->flags); \
++ (page)->index = 0; \
++} while (0)
++#define PageForeignDestructor(page) \
++ ( (void (*) (struct page *)) (page)->index )(page)
++
+ struct page; /* forward declaration */
+
+ extern void cancel_dirty_page(struct page *page, unsigned int account_size);
+Index: head-2007-09-25/include/linux/skbuff.h
+===================================================================
+--- head-2007-09-25.orig/include/linux/skbuff.h 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/include/linux/skbuff.h 2007-09-25 14:35:02.000000000 +0200
+@@ -212,6 +212,8 @@ typedef unsigned char *sk_buff_data_t;
+ * @local_df: allow local fragmentation
+ * @cloned: Head may be cloned (check refcnt to be sure)
+ * @nohdr: Payload reference only, must not modify header
++ * @proto_data_valid: Protocol data validated since arriving at localhost
++ * @proto_csum_blank: Protocol csum must be added before leaving localhost
+ * @pkt_type: Packet class
+ * @fclone: skbuff clone status
+ * @ip_summed: Driver fed us an IP checksum
+@@ -277,7 +279,13 @@ struct sk_buff {
+ nfctinfo:3;
+ __u8 pkt_type:3,
+ fclone:2,
++#ifndef CONFIG_XEN
+ ipvs_property:1;
++#else
++ ipvs_property:1,
++ proto_data_valid:1,
++ proto_csum_blank:1;
++#endif
+ __be16 protocol;
+
+ void (*destructor)(struct sk_buff *skb);
+Index: head-2007-09-25/kernel/irq/spurious.c
+===================================================================
+--- head-2007-09-25.orig/kernel/irq/spurious.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/kernel/irq/spurious.c 2007-09-25 14:35:02.000000000 +0200
+@@ -172,7 +172,8 @@ void note_interrupt(unsigned int irq, st
+ irqreturn_t action_ret)
+ {
+ if (unlikely(action_ret != IRQ_HANDLED)) {
+- desc->irqs_unhandled++;
++ if (!irq_ignore_unhandled(irq))
++ desc->irqs_unhandled++;
+ if (unlikely(action_ret != IRQ_NONE))
+ report_bad_irq(irq, desc, action_ret);
+ }
+Index: head-2007-09-25/kernel/kexec.c
+===================================================================
+--- head-2007-09-25.orig/kernel/kexec.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/kernel/kexec.c 2007-09-25 14:35:02.000000000 +0200
+@@ -331,13 +331,27 @@ static int kimage_is_destination_range(s
+ return 0;
+ }
+
+-static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
++static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
+ {
+ struct page *pages;
+
+ pages = alloc_pages(gfp_mask, order);
+ if (pages) {
+ unsigned int count, i;
++#ifdef CONFIG_XEN
++ int address_bits;
++
++ if (limit == ~0UL)
++ address_bits = BITS_PER_LONG;
++ else
++ address_bits = long_log2(limit);
++
++ if (xen_create_contiguous_region((unsigned long)page_address(pages),
++ order, address_bits) < 0) {
++ __free_pages(pages, order);
++ return NULL;
++ }
++#endif
+ pages->mapping = NULL;
+ set_page_private(pages, order);
+ count = 1 << order;
+@@ -356,6 +370,9 @@ static void kimage_free_pages(struct pag
+ count = 1 << order;
+ for (i = 0; i < count; i++)
+ ClearPageReserved(page + i);
++#ifdef CONFIG_XEN
++ xen_destroy_contiguous_region((unsigned long)page_address(page), order);
++#endif
+ __free_pages(page, order);
+ }
+
+@@ -401,10 +418,10 @@ static struct page *kimage_alloc_normal_
+ do {
+ unsigned long pfn, epfn, addr, eaddr;
+
+- pages = kimage_alloc_pages(GFP_KERNEL, order);
++ pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
+ if (!pages)
+ break;
+- pfn = page_to_pfn(pages);
++ pfn = kexec_page_to_pfn(pages);
+ epfn = pfn + count;
+ addr = pfn << PAGE_SHIFT;
+ eaddr = epfn << PAGE_SHIFT;
+@@ -438,6 +455,7 @@ static struct page *kimage_alloc_normal_
+ return pages;
+ }
+
++#ifndef CONFIG_XEN
+ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
+ unsigned int order)
+ {
+@@ -491,7 +509,7 @@ static struct page *kimage_alloc_crash_c
+ }
+ /* If I don't overlap any segments I have found my hole! */
+ if (i == image->nr_segments) {
+- pages = pfn_to_page(hole_start >> PAGE_SHIFT);
++ pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
+ break;
+ }
+ }
+@@ -518,6 +536,13 @@ struct page *kimage_alloc_control_pages(
+
+ return pages;
+ }
++#else /* !CONFIG_XEN */
++struct page *kimage_alloc_control_pages(struct kimage *image,
++ unsigned int order)
++{
++ return kimage_alloc_normal_control_pages(image, order);
++}
++#endif
+
+ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
+ {
+@@ -533,7 +558,7 @@ static int kimage_add_entry(struct kimag
+ return -ENOMEM;
+
+ ind_page = page_address(page);
+- *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
++ *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
+ image->entry = ind_page;
+ image->last_entry = ind_page +
+ ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
+@@ -594,13 +619,13 @@ static int kimage_terminate(struct kimag
+ #define for_each_kimage_entry(image, ptr, entry) \
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
+ ptr = (entry & IND_INDIRECTION)? \
+- phys_to_virt((entry & PAGE_MASK)): ptr +1)
++ kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
+
+ static void kimage_free_entry(kimage_entry_t entry)
+ {
+ struct page *page;
+
+- page = pfn_to_page(entry >> PAGE_SHIFT);
++ page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
+ kimage_free_pages(page);
+ }
+
+@@ -612,6 +637,10 @@ static void kimage_free(struct kimage *i
+ if (!image)
+ return;
+
++#ifdef CONFIG_XEN
++ xen_machine_kexec_unload(image);
++#endif
++
+ kimage_free_extra_pages(image);
+ for_each_kimage_entry(image, ptr, entry) {
+ if (entry & IND_INDIRECTION) {
+@@ -687,7 +716,7 @@ static struct page *kimage_alloc_page(st
+ * have a match.
+ */
+ list_for_each_entry(page, &image->dest_pages, lru) {
+- addr = page_to_pfn(page) << PAGE_SHIFT;
++ addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
+ if (addr == destination) {
+ list_del(&page->lru);
+ return page;
+@@ -698,16 +727,16 @@ static struct page *kimage_alloc_page(st
+ kimage_entry_t *old;
+
+ /* Allocate a page, if we run out of memory give up */
+- page = kimage_alloc_pages(gfp_mask, 0);
++ page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
+ if (!page)
+ return NULL;
+ /* If the page cannot be used file it away */
+- if (page_to_pfn(page) >
++ if (kexec_page_to_pfn(page) >
+ (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
+ list_add(&page->lru, &image->unuseable_pages);
+ continue;
+ }
+- addr = page_to_pfn(page) << PAGE_SHIFT;
++ addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
+
+ /* If it is the destination page we want use it */
+ if (addr == destination)
+@@ -730,7 +759,7 @@ static struct page *kimage_alloc_page(st
+ struct page *old_page;
+
+ old_addr = *old & PAGE_MASK;
+- old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
++ old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
+ copy_highpage(page, old_page);
+ *old = addr | (*old & ~PAGE_MASK);
+
+@@ -780,7 +809,7 @@ static int kimage_load_normal_segment(st
+ result = -ENOMEM;
+ goto out;
+ }
+- result = kimage_add_page(image, page_to_pfn(page)
++ result = kimage_add_page(image, kexec_page_to_pfn(page)
+ << PAGE_SHIFT);
+ if (result < 0)
+ goto out;
+@@ -812,6 +841,7 @@ out:
+ return result;
+ }
+
++#ifndef CONFIG_XEN
+ static int kimage_load_crash_segment(struct kimage *image,
+ struct kexec_segment *segment)
+ {
+@@ -834,7 +864,7 @@ static int kimage_load_crash_segment(str
+ char *ptr;
+ size_t uchunk, mchunk;
+
+- page = pfn_to_page(maddr >> PAGE_SHIFT);
++ page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
+ if (page == 0) {
+ result = -ENOMEM;
+ goto out;
+@@ -883,6 +913,13 @@ static int kimage_load_segment(struct ki
+
+ return result;
+ }
++#else /* CONFIG_XEN */
++static int kimage_load_segment(struct kimage *image,
++ struct kexec_segment *segment)
++{
++ return kimage_load_normal_segment(image, segment);
++}
++#endif
+
+ /*
+ * Exec Kernel system call: for obvious reasons only root may call it.
+@@ -993,6 +1030,13 @@ asmlinkage long sys_kexec_load(unsigned
+ if (result)
+ goto out;
+ }
++#ifdef CONFIG_XEN
++ if (image) {
++ result = xen_machine_kexec_load(image);
++ if (result)
++ goto out;
++ }
++#endif
+ /* Install the new kernel, and Uninstall the old */
+ image = xchg(dest_image, image);
+
+@@ -1047,7 +1091,6 @@ void crash_kexec(struct pt_regs *regs)
+ {
+ int locked;
+
+-
+ /* Take the kexec_lock here to prevent sys_kexec_load
+ * running on one cpu from replacing the crash kernel
+ * we are using after a panic on a different cpu.
+Index: head-2007-09-25/lib/Makefile
+===================================================================
+--- head-2007-09-25.orig/lib/Makefile 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/lib/Makefile 2007-09-25 14:35:02.000000000 +0200
+@@ -58,6 +58,7 @@ obj-$(CONFIG_SMP) += percpu_counter.o
+ obj-$(CONFIG_AUDIT_GENERIC) += audit.o
+
+ obj-$(CONFIG_SWIOTLB) += swiotlb.o
++swiotlb-$(CONFIG_XEN) := ../arch/i386/kernel/swiotlb.o
+ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
+
+ lib-$(CONFIG_GENERIC_BUG) += bug.o
+Index: head-2007-09-25/mm/highmem.c
+===================================================================
+--- head-2007-09-25.orig/mm/highmem.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/mm/highmem.c 2007-09-25 14:35:02.000000000 +0200
+@@ -158,6 +158,17 @@ start:
+ return vaddr;
+ }
+
++#ifdef CONFIG_XEN
++void kmap_flush_unused(void)
++{
++ spin_lock(&kmap_lock);
++ flush_all_zero_pkmaps();
++ spin_unlock(&kmap_lock);
++}
++
++EXPORT_SYMBOL(kmap_flush_unused);
++#endif
++
+ void fastcall *kmap_high(struct page *page)
+ {
+ unsigned long vaddr;
+Index: head-2007-09-25/mm/memory.c
+===================================================================
+--- head-2007-09-25.orig/mm/memory.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/mm/memory.c 2007-09-25 14:35:02.000000000 +0200
+@@ -404,7 +404,8 @@ struct page *vm_normal_page(struct vm_ar
+ * and that the resulting page looks ok.
+ */
+ if (unlikely(!pfn_valid(pfn))) {
+- print_bad_pte(vma, pte, addr);
++ if (!(vma->vm_flags & VM_RESERVED))
++ print_bad_pte(vma, pte, addr);
+ return NULL;
+ }
+
+@@ -662,8 +663,12 @@ static unsigned long zap_pte_range(struc
+ page->index > details->last_index))
+ continue;
+ }
+- ptent = ptep_get_and_clear_full(mm, addr, pte,
+- tlb->fullmm);
++ if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte))
++ ptent = vma->vm_ops->zap_pte(vma, addr, pte,
++ tlb->fullmm);
++ else
++ ptent = ptep_get_and_clear_full(mm, addr, pte,
++ tlb->fullmm);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ if (unlikely(!page))
+ continue;
+@@ -896,6 +901,7 @@ unsigned long zap_page_range(struct vm_a
+ tlb_finish_mmu(tlb, address, end);
+ return end;
+ }
++EXPORT_SYMBOL(zap_page_range);
+
+ /*
+ * Do a quick page-table lookup for a single page.
+@@ -1035,6 +1041,26 @@ int get_user_pages(struct task_struct *t
+ continue;
+ }
+
++#ifdef CONFIG_XEN
++ if (vma && (vma->vm_flags & VM_FOREIGN)) {
++ struct page **map = vma->vm_private_data;
++ int offset = (start - vma->vm_start) >> PAGE_SHIFT;
++ if (map[offset] != NULL) {
++ if (pages) {
++ struct page *page = map[offset];
++
++ pages[i] = page;
++ get_page(page);
++ }
++ if (vmas)
++ vmas[i] = vma;
++ i++;
++ start += PAGE_SIZE;
++ len--;
++ continue;
++ }
++ }
++#endif
+ if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ || !(vm_flags & vma->vm_flags))
+ return i ? : -EFAULT;
+Index: head-2007-09-25/mm/page_alloc.c
+===================================================================
+--- head-2007-09-25.orig/mm/page_alloc.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/mm/page_alloc.c 2007-09-25 14:35:02.000000000 +0200
+@@ -206,7 +206,11 @@ static void bad_page(struct page *page)
+ 1 << PG_slab |
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+- 1 << PG_buddy );
++ 1 << PG_buddy |
++#ifdef CONFIG_X86_XEN
++ 1 << PG_pinned |
++#endif
++ 1 << PG_foreign );
+ set_page_count(page, 0);
+ reset_page_mapcount(page);
+ page->mapping = NULL;
+@@ -442,7 +446,11 @@ static inline int free_pages_check(struc
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+ 1 << PG_reserved |
+- 1 << PG_buddy ))))
++ 1 << PG_buddy |
++#ifdef CONFIG_X86_XEN
++ 1 << PG_pinned |
++#endif
++ 1 << PG_foreign ))))
+ bad_page(page);
+ /*
+ * PageReclaim == PageTail. It is only an error
+@@ -504,6 +512,12 @@ static void __free_pages_ok(struct page
+ int i;
+ int reserved = 0;
+
++#ifdef CONFIG_XEN
++ if (PageForeign(page)) {
++ PageForeignDestructor(page);
++ return;
++ }
++#endif
+ for (i = 0 ; i < (1 << order) ; ++i)
+ reserved += free_pages_check(page + i);
+ if (reserved)
+@@ -598,7 +612,11 @@ static int prep_new_page(struct page *pa
+ 1 << PG_swapcache |
+ 1 << PG_writeback |
+ 1 << PG_reserved |
+- 1 << PG_buddy ))))
++ 1 << PG_buddy |
++#ifdef CONFIG_X86_XEN
++ 1 << PG_pinned |
++#endif
++ 1 << PG_foreign ))))
+ bad_page(page);
+
+ /*
+@@ -781,6 +799,12 @@ static void fastcall free_hot_cold_page(
+ struct per_cpu_pages *pcp;
+ unsigned long flags;
+
++#ifdef CONFIG_XEN
++ if (PageForeign(page)) {
++ PageForeignDestructor(page);
++ return;
++ }
++#endif
+ if (PageAnon(page))
+ page->mapping = NULL;
+ if (free_pages_check(page))
+Index: head-2007-09-25/net/core/dev.c
+===================================================================
+--- head-2007-09-25.orig/net/core/dev.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/net/core/dev.c 2007-09-25 14:35:02.000000000 +0200
+@@ -118,6 +118,12 @@
+ #include <linux/ctype.h>
+ #include <linux/if_arp.h>
+
++#ifdef CONFIG_XEN
++#include <net/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#endif
++
+ /*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+@@ -1456,6 +1462,43 @@ out_kfree_skb:
+ } \
+ }
+
++#ifdef CONFIG_XEN
++inline int skb_checksum_setup(struct sk_buff *skb)
++{
++ if (skb->proto_csum_blank) {
++ if (skb->protocol != htons(ETH_P_IP))
++ goto out;
++ skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
++ if (skb->h.raw >= skb->tail)
++ goto out;
++ switch (skb->nh.iph->protocol) {
++ case IPPROTO_TCP:
++ skb->csum = offsetof(struct tcphdr, check);
++ break;
++ case IPPROTO_UDP:
++ skb->csum = offsetof(struct udphdr, check);
++ break;
++ default:
++ if (net_ratelimit())
++ printk(KERN_ERR "Attempting to checksum a non-"
++ "TCP/UDP packet, dropping a protocol"
++ " %d packet", skb->nh.iph->protocol);
++ goto out;
++ }
++ if ((skb->h.raw + skb->csum + 2) > skb->tail)
++ goto out;
++ skb->ip_summed = CHECKSUM_HW;
++ skb->proto_csum_blank = 0;
++ }
++ return 0;
++out:
++ return -EPROTO;
++}
++#else
++inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
++#endif
++
++
+ /**
+ * dev_queue_xmit - transmit a buffer
+ * @skb: buffer to transmit
+@@ -1488,6 +1531,12 @@ int dev_queue_xmit(struct sk_buff *skb)
+ struct Qdisc *q;
+ int rc = -ENOMEM;
+
++ /* If a checksum-deferred packet is forwarded to a device that needs a
++ * checksum, correct the pointers and force checksumming.
++ */
++ if (skb_checksum_setup(skb))
++ goto out_kfree_skb;
++
+ /* GSO will handle the following emulations directly. */
+ if (netif_needs_gso(dev, skb))
+ goto gso;
+@@ -1874,6 +1923,19 @@ int netif_receive_skb(struct sk_buff *sk
+ }
+ #endif
+
++#ifdef CONFIG_XEN
++ switch (skb->ip_summed) {
++ case CHECKSUM_UNNECESSARY:
++ skb->proto_data_valid = 1;
++ break;
++ case CHECKSUM_HW:
++ /* XXX Implement me. */
++ default:
++ skb->proto_data_valid = 0;
++ break;
++ }
++#endif
++
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ if (!ptype->dev || ptype->dev == skb->dev) {
+ if (pt_prev)
+@@ -3778,6 +3840,7 @@ EXPORT_SYMBOL(unregister_netdevice_notif
+ EXPORT_SYMBOL(net_enable_timestamp);
+ EXPORT_SYMBOL(net_disable_timestamp);
+ EXPORT_SYMBOL(dev_get_flags);
++EXPORT_SYMBOL(skb_checksum_setup);
+
+ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+ EXPORT_SYMBOL(br_handle_frame_hook);
+Index: head-2007-09-25/net/core/skbuff.c
+===================================================================
+--- head-2007-09-25.orig/net/core/skbuff.c 2007-09-25 14:22:36.000000000 +0200
++++ head-2007-09-25/net/core/skbuff.c 2007-09-25 14:35:02.000000000 +0200
+@@ -416,6 +416,10 @@ struct sk_buff *skb_clone(struct sk_buff
+ C(local_df);
+ n->cloned = 1;
+ n->nohdr = 0;
++#ifdef CONFIG_XEN
++ C(proto_data_valid);
++ C(proto_csum_blank);
++#endif
+ C(pkt_type);
+ C(ip_summed);
+ C(priority);